python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
/* * cxgb3i_offload.c: Chelsio S3xx iscsi offloaded tcp connection management * * Copyright (C) 2003-2015 Chelsio Communications. All rights reserved. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this * release for licensing terms and conditions. * * Written by: Dimitris Michailidis ([email protected]) * Karen Xie ([email protected]) */ #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ #include <linux/module.h> #include <linux/moduleparam.h> #include <scsi/scsi_host.h> #include "common.h" #include "t3_cpl.h" #include "t3cdev.h" #include "cxgb3_defs.h" #include "cxgb3_ctl_defs.h" #include "cxgb3_offload.h" #include "firmware_exports.h" #include "cxgb3i.h" static unsigned int dbg_level; #include "../libcxgbi.h" #define DRV_MODULE_NAME "cxgb3i" #define DRV_MODULE_DESC "Chelsio T3 iSCSI Driver" #define DRV_MODULE_VERSION "2.0.1-ko" #define DRV_MODULE_RELDATE "Apr. 2015" static char version[] = DRV_MODULE_DESC " " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; MODULE_AUTHOR("Chelsio Communications, Inc."); MODULE_DESCRIPTION(DRV_MODULE_DESC); MODULE_VERSION(DRV_MODULE_VERSION); MODULE_LICENSE("GPL"); module_param(dbg_level, uint, 0644); MODULE_PARM_DESC(dbg_level, "debug flag (default=0)"); static int cxgb3i_rcv_win = 256 * 1024; module_param(cxgb3i_rcv_win, int, 0644); MODULE_PARM_DESC(cxgb3i_rcv_win, "TCP receive window in bytes (default=256KB)"); static int cxgb3i_snd_win = 128 * 1024; module_param(cxgb3i_snd_win, int, 0644); MODULE_PARM_DESC(cxgb3i_snd_win, "TCP send window in bytes (default=128KB)"); static int cxgb3i_rx_credit_thres = 10 * 1024; module_param(cxgb3i_rx_credit_thres, int, 0644); MODULE_PARM_DESC(cxgb3i_rx_credit_thres, "RX credits return threshold in bytes (default=10KB)"); static unsigned int cxgb3i_max_connect = 8 * 1024; module_param(cxgb3i_max_connect, uint, 0644); MODULE_PARM_DESC(cxgb3i_max_connect, "Max. # of connections (default=8092)"); static unsigned int cxgb3i_sport_base = 20000; module_param(cxgb3i_sport_base, uint, 0644); MODULE_PARM_DESC(cxgb3i_sport_base, "starting port number (default=20000)"); static void cxgb3i_dev_open(struct t3cdev *); static void cxgb3i_dev_close(struct t3cdev *); static void cxgb3i_dev_event_handler(struct t3cdev *, u32, u32); static struct cxgb3_client t3_client = { .name = DRV_MODULE_NAME, .handlers = cxgb3i_cpl_handlers, .add = cxgb3i_dev_open, .remove = cxgb3i_dev_close, .event_handler = cxgb3i_dev_event_handler, }; static const struct scsi_host_template cxgb3i_host_template = { .module = THIS_MODULE, .name = DRV_MODULE_NAME, .proc_name = DRV_MODULE_NAME, .can_queue = CXGB3I_SCSI_HOST_QDEPTH, .queuecommand = iscsi_queuecommand, .change_queue_depth = scsi_change_queue_depth, .sg_tablesize = SG_ALL, .max_sectors = 0xFFFF, .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, .eh_timed_out = iscsi_eh_cmd_timed_out, .eh_abort_handler = iscsi_eh_abort, .eh_device_reset_handler = iscsi_eh_device_reset, .eh_target_reset_handler = iscsi_eh_recover_target, .target_alloc = iscsi_target_alloc, .dma_boundary = PAGE_SIZE - 1, .this_id = -1, .track_queue_depth = 1, .cmd_size = sizeof(struct iscsi_cmd), }; static struct iscsi_transport cxgb3i_iscsi_transport = { .owner = THIS_MODULE, .name = DRV_MODULE_NAME, /* owner and name should be set already */ .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST | CAP_DATADGST | CAP_DIGEST_OFFLOAD | CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO, .attr_is_visible = cxgbi_attr_is_visible, .get_host_param = cxgbi_get_host_param, .set_host_param = cxgbi_set_host_param, /* session management */ .create_session = cxgbi_create_session, .destroy_session = cxgbi_destroy_session, .get_session_param = iscsi_session_get_param, /* connection management */ .create_conn = cxgbi_create_conn, .bind_conn = cxgbi_bind_conn, .unbind_conn = iscsi_conn_unbind, .destroy_conn = iscsi_tcp_conn_teardown, .start_conn = iscsi_conn_start, .stop_conn = iscsi_conn_stop, .get_conn_param = iscsi_conn_get_param, .set_param = cxgbi_set_conn_param, .get_stats = cxgbi_get_conn_stats, /* pdu xmit req from user space */ .send_pdu = iscsi_conn_send_pdu, /* task */ .init_task = iscsi_tcp_task_init, .xmit_task = iscsi_tcp_task_xmit, .cleanup_task = cxgbi_cleanup_task, /* pdu */ .alloc_pdu = cxgbi_conn_alloc_pdu, .init_pdu = cxgbi_conn_init_pdu, .xmit_pdu = cxgbi_conn_xmit_pdu, .parse_pdu_itt = cxgbi_parse_pdu_itt, /* TCP connect/disconnect */ .get_ep_param = cxgbi_get_ep_param, .ep_connect = cxgbi_ep_connect, .ep_poll = cxgbi_ep_poll, .ep_disconnect = cxgbi_ep_disconnect, /* Error recovery timeout call */ .session_recovery_timedout = iscsi_session_recovery_timedout, }; static struct scsi_transport_template *cxgb3i_stt; /* * CPL (Chelsio Protocol Language) defines a message passing interface between * the host driver and Chelsio asic. * The section below implments CPLs that related to iscsi tcp connection * open/close/abort and data send/receive. */ static int push_tx_frames(struct cxgbi_sock *csk, int req_completion); static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, const struct l2t_entry *e) { unsigned int wscale = cxgbi_sock_compute_wscale(csk->rcv_win); struct cpl_act_open_req *req = (struct cpl_act_open_req *)skb->head; skb->priority = CPL_PRIORITY_SETUP; req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, csk->atid)); req->local_port = csk->saddr.sin_port; req->peer_port = csk->daddr.sin_port; req->local_ip = csk->saddr.sin_addr.s_addr; req->peer_ip = csk->daddr.sin_addr.s_addr; req->opt0h = htonl(V_KEEP_ALIVE(1) | F_TCAM_BYPASS | V_WND_SCALE(wscale) | V_MSS_IDX(csk->mss_idx) | V_L2T_IDX(e->idx) | V_TX_CHANNEL(e->smt_idx)); req->opt0l = htonl(V_ULP_MODE(ULP2_MODE_ISCSI) | V_RCV_BUFSIZ(csk->rcv_win >> 10)); log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u, %pI4:%u-%pI4:%u, %u,%u,%u.\n", csk, csk->state, csk->flags, csk->atid, &req->local_ip, ntohs(req->local_port), &req->peer_ip, ntohs(req->peer_port), csk->mss_idx, e->idx, e->smt_idx); l2t_send(csk->cdev->lldev, skb, csk->l2t); } static inline void act_open_arp_failure(struct t3cdev *dev, struct sk_buff *skb) { cxgbi_sock_act_open_req_arp_failure(NULL, skb); } /* * CPL connection close request: host -> * * Close a connection by sending a CPL_CLOSE_CON_REQ message and queue it to * the write queue (i.e., after any unsent txt data). */ static void send_close_req(struct cxgbi_sock *csk) { struct sk_buff *skb = csk->cpl_close; struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head; unsigned int tid = csk->tid; log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", csk, csk->state, csk->flags, csk->tid); csk->cpl_close = NULL; req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON)); req->wr.wr_lo = htonl(V_WR_TID(tid)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid)); req->rsvd = htonl(csk->write_seq); cxgbi_sock_skb_entail(csk, skb); if (csk->state >= CTP_ESTABLISHED) push_tx_frames(csk, 1); } /* * CPL connection abort request: host -> * * Send an ABORT_REQ message. Makes sure we do not send multiple ABORT_REQs * for the same connection and also that we do not try to send a message * after the connection has closed. */ static void abort_arp_failure(struct t3cdev *tdev, struct sk_buff *skb) { struct cpl_abort_req *req = cplhdr(skb); log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "t3dev 0x%p, tid %u, skb 0x%p.\n", tdev, GET_TID(req), skb); req->cmd = CPL_ABORT_NO_RST; cxgb3_ofld_send(tdev, skb); } static void send_abort_req(struct cxgbi_sock *csk) { struct sk_buff *skb = csk->cpl_abort_req; struct cpl_abort_req *req; if (unlikely(csk->state == CTP_ABORTING || !skb)) return; cxgbi_sock_set_state(csk, CTP_ABORTING); cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING); /* Purge the send queue so we don't send anything after an abort. */ cxgbi_sock_purge_write_queue(csk); csk->cpl_abort_req = NULL; req = (struct cpl_abort_req *)skb->head; skb->priority = CPL_PRIORITY_DATA; set_arp_failure_handler(skb, abort_arp_failure); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ)); req->wr.wr_lo = htonl(V_WR_TID(csk->tid)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid)); req->rsvd0 = htonl(csk->snd_nxt); req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT); req->cmd = CPL_ABORT_SEND_RST; log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n", csk, csk->state, csk->flags, csk->tid, csk->snd_nxt, req->rsvd1); l2t_send(csk->cdev->lldev, skb, csk->l2t); } /* * CPL connection abort reply: host -> * * Send an ABORT_RPL message in response of the ABORT_REQ received. */ static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status) { struct sk_buff *skb = csk->cpl_abort_rpl; struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head; log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u, status %d.\n", csk, csk->state, csk->flags, csk->tid, rst_status); csk->cpl_abort_rpl = NULL; skb->priority = CPL_PRIORITY_DATA; rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL)); rpl->wr.wr_lo = htonl(V_WR_TID(csk->tid)); OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid)); rpl->cmd = rst_status; cxgb3_ofld_send(csk->cdev->lldev, skb); } /* * CPL connection rx data ack: host -> * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of * credits sent. */ static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits) { struct sk_buff *skb; struct cpl_rx_data_ack *req; u32 dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1); log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, "csk 0x%p,%u,0x%lx,%u, credit %u, dack %u.\n", csk, csk->state, csk->flags, csk->tid, credits, dack); skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC); if (!skb) { pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits); return 0; } req = (struct cpl_rx_data_ack *)skb->head; req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, csk->tid)); req->credit_dack = htonl(F_RX_DACK_CHANGE | V_RX_DACK_MODE(1) | V_RX_CREDITS(credits)); skb->priority = CPL_PRIORITY_ACK; cxgb3_ofld_send(csk->cdev->lldev, skb); return credits; } /* * CPL connection tx data: host -> * * Send iscsi PDU via TX_DATA CPL message. Returns the number of * credits sent. * Each TX_DATA consumes work request credit (wrs), so we need to keep track of * how many we've used so far and how many are pending (i.e., yet ack'ed by T3). */ static unsigned int wrlen __read_mostly; static unsigned int skb_wrs[SKB_WR_LIST_SIZE] __read_mostly; static void init_wr_tab(unsigned int wr_len) { int i; if (skb_wrs[1]) /* already initialized */ return; for (i = 1; i < SKB_WR_LIST_SIZE; i++) { int sgl_len = (3 * i) / 2 + (i & 1); sgl_len += 3; skb_wrs[i] = (sgl_len <= wr_len ? 1 : 1 + (sgl_len - 2) / (wr_len - 1)); } wrlen = wr_len * 8; } static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb, int len, int req_completion) { struct tx_data_wr *req; struct l2t_entry *l2t = csk->l2t; skb_reset_transport_header(skb); req = __skb_push(skb, sizeof(*req)); req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA) | (req_completion ? F_WR_COMPL : 0)); req->wr_lo = htonl(V_WR_TID(csk->tid)); /* len includes the length of any HW ULP additions */ req->len = htonl(len); /* V_TX_ULP_SUBMODE sets both the mode and submode */ req->flags = htonl(V_TX_ULP_SUBMODE(cxgbi_skcb_tx_ulp_mode(skb)) | V_TX_SHOVE((skb_peek(&csk->write_queue) ? 0 : 1))); req->sndseq = htonl(csk->snd_nxt); req->param = htonl(V_TX_PORT(l2t->smt_idx)); if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { req->flags |= htonl(V_TX_ACK_PAGES(2) | F_TX_INIT | V_TX_CPU_IDX(csk->rss_qid)); /* sendbuffer is in units of 32KB. */ req->param |= htonl(V_TX_SNDBUF(csk->snd_win >> 15)); cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); } } /* * push_tx_frames -- start transmit * * Prepends TX_DATA_WR or CPL_CLOSE_CON_REQ headers to buffers waiting in a * connection's send queue and sends them on to T3. Must be called with the * connection's lock held. Returns the amount of send buffer space that was * freed as a result of sending queued data to T3. */ static void arp_failure_skb_discard(struct t3cdev *dev, struct sk_buff *skb) { kfree_skb(skb); } static int push_tx_frames(struct cxgbi_sock *csk, int req_completion) { int total_size = 0; struct sk_buff *skb; if (unlikely(csk->state < CTP_ESTABLISHED || csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) { log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX, "csk 0x%p,%u,0x%lx,%u, in closing state.\n", csk, csk->state, csk->flags, csk->tid); return 0; } while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) { int len = skb->len; /* length before skb_push */ int frags = skb_shinfo(skb)->nr_frags + (len != skb->data_len); int wrs_needed = skb_wrs[frags]; if (wrs_needed > 1 && len + sizeof(struct tx_data_wr) <= wrlen) wrs_needed = 1; WARN_ON(frags >= SKB_WR_LIST_SIZE || wrs_needed < 1); if (csk->wr_cred < wrs_needed) { log_debug(1 << CXGBI_DBG_PDU_TX, "csk 0x%p, skb len %u/%u, frag %u, wr %d<%u.\n", csk, skb->len, skb->data_len, frags, wrs_needed, csk->wr_cred); break; } __skb_unlink(skb, &csk->write_queue); skb->priority = CPL_PRIORITY_DATA; skb->csum = wrs_needed; /* remember this until the WR_ACK */ csk->wr_cred -= wrs_needed; csk->wr_una_cred += wrs_needed; cxgbi_sock_enqueue_wr(csk, skb); log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX, "csk 0x%p, enqueue, skb len %u/%u, frag %u, wr %d, " "left %u, unack %u.\n", csk, skb->len, skb->data_len, frags, skb->csum, csk->wr_cred, csk->wr_una_cred); if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) { if ((req_completion && csk->wr_una_cred == wrs_needed) || csk->wr_una_cred >= csk->wr_max_cred / 2) { req_completion = 1; csk->wr_una_cred = 0; } len += cxgbi_ulp_extra_len(cxgbi_skcb_tx_ulp_mode(skb)); make_tx_data_wr(csk, skb, len, req_completion); csk->snd_nxt += len; cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR); } total_size += skb->truesize; log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX, "csk 0x%p, tid 0x%x, send skb 0x%p.\n", csk, csk->tid, skb); set_arp_failure_handler(skb, arp_failure_skb_discard); l2t_send(csk->cdev->lldev, skb, csk->l2t); } return total_size; } /* * Process a CPL_ACT_ESTABLISH message: -> host * Updates connection state from an active establish CPL message. Runs with * the connection lock held. */ static inline void free_atid(struct cxgbi_sock *csk) { if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) { cxgb3_free_atid(csk->cdev->lldev, csk->atid); cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID); cxgbi_sock_put(csk); } } static int do_act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) { struct cxgbi_sock *csk = ctx; struct cpl_act_establish *req = cplhdr(skb); unsigned int tid = GET_TID(req); unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid)); u32 rcv_isn = ntohl(req->rcv_isn); /* real RCV_ISN + 1 */ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "atid 0x%x,tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n", atid, atid, csk, csk->state, csk->flags, rcv_isn); cxgbi_sock_get(csk); cxgbi_sock_set_flag(csk, CTPF_HAS_TID); csk->tid = tid; cxgb3_insert_tid(csk->cdev->lldev, &t3_client, csk, tid); free_atid(csk); csk->rss_qid = G_QNUM(ntohs(skb->csum)); spin_lock_bh(&csk->lock); if (csk->retry_timer.function) { del_timer(&csk->retry_timer); csk->retry_timer.function = NULL; } if (unlikely(csk->state != CTP_ACTIVE_OPEN)) pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n", csk, csk->state, csk->flags, csk->tid); csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn; if (csk->rcv_win > (M_RCV_BUFSIZ << 10)) csk->rcv_wup -= csk->rcv_win - (M_RCV_BUFSIZ << 10); cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt)); if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED))) /* upper layer has requested closing */ send_abort_req(csk); else { if (skb_queue_len(&csk->write_queue)) push_tx_frames(csk, 1); cxgbi_conn_tx_open(csk); } spin_unlock_bh(&csk->lock); __kfree_skb(skb); return 0; } /* * Process a CPL_ACT_OPEN_RPL message: -> host * Handle active open failures. */ static int act_open_rpl_status_to_errno(int status) { switch (status) { case CPL_ERR_CONN_RESET: return -ECONNREFUSED; case CPL_ERR_ARP_MISS: return -EHOSTUNREACH; case CPL_ERR_CONN_TIMEDOUT: return -ETIMEDOUT; case CPL_ERR_TCAM_FULL: return -ENOMEM; case CPL_ERR_CONN_EXIST: return -EADDRINUSE; default: return -EIO; } } static void act_open_retry_timer(struct timer_list *t) { struct cxgbi_sock *csk = from_timer(csk, t, retry_timer); struct sk_buff *skb; log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", csk, csk->state, csk->flags, csk->tid); cxgbi_sock_get(csk); spin_lock_bh(&csk->lock); skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_ATOMIC); if (!skb) cxgbi_sock_fail_act_open(csk, -ENOMEM); else { skb->sk = (struct sock *)csk; set_arp_failure_handler(skb, act_open_arp_failure); send_act_open_req(csk, skb, csk->l2t); } spin_unlock_bh(&csk->lock); cxgbi_sock_put(csk); } static int do_act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) { struct cxgbi_sock *csk = ctx; struct cpl_act_open_rpl *rpl = cplhdr(skb); pr_info("csk 0x%p,%u,0x%lx,%u, status %u, %pI4:%u-%pI4:%u.\n", csk, csk->state, csk->flags, csk->atid, rpl->status, &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port), &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port)); if (rpl->status != CPL_ERR_TCAM_FULL && rpl->status != CPL_ERR_CONN_EXIST && rpl->status != CPL_ERR_ARP_MISS) cxgb3_queue_tid_release(tdev, GET_TID(rpl)); cxgbi_sock_get(csk); spin_lock_bh(&csk->lock); if (rpl->status == CPL_ERR_CONN_EXIST && csk->retry_timer.function != act_open_retry_timer) { csk->retry_timer.function = act_open_retry_timer; mod_timer(&csk->retry_timer, jiffies + HZ / 2); } else cxgbi_sock_fail_act_open(csk, act_open_rpl_status_to_errno(rpl->status)); spin_unlock_bh(&csk->lock); cxgbi_sock_put(csk); __kfree_skb(skb); return 0; } /* * Process PEER_CLOSE CPL messages: -> host * Handle peer FIN. */ static int do_peer_close(struct t3cdev *cdev, struct sk_buff *skb, void *ctx) { struct cxgbi_sock *csk = ctx; log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", csk, csk->state, csk->flags, csk->tid); cxgbi_sock_rcv_peer_close(csk); __kfree_skb(skb); return 0; } /* * Process CLOSE_CONN_RPL CPL message: -> host * Process a peer ACK to our FIN. */ static int do_close_con_rpl(struct t3cdev *cdev, struct sk_buff *skb, void *ctx) { struct cxgbi_sock *csk = ctx; struct cpl_close_con_rpl *rpl = cplhdr(skb); log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u, snxt %u.\n", csk, csk->state, csk->flags, csk->tid, ntohl(rpl->snd_nxt)); cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt)); __kfree_skb(skb); return 0; } /* * Process ABORT_REQ_RSS CPL message: -> host * Process abort requests. If we are waiting for an ABORT_RPL we ignore this * request except that we need to reply to it. */ static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason, int *need_rst) { switch (abort_reason) { case CPL_ERR_BAD_SYN: case CPL_ERR_CONN_RESET: return csk->state > CTP_ESTABLISHED ? -EPIPE : -ECONNRESET; case CPL_ERR_XMIT_TIMEDOUT: case CPL_ERR_PERSIST_TIMEDOUT: case CPL_ERR_FINWAIT2_TIMEDOUT: case CPL_ERR_KEEPALIVE_TIMEDOUT: return -ETIMEDOUT; default: return -EIO; } } static int do_abort_req(struct t3cdev *cdev, struct sk_buff *skb, void *ctx) { const struct cpl_abort_req_rss *req = cplhdr(skb); struct cxgbi_sock *csk = ctx; int rst_status = CPL_ABORT_NO_RST; log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", csk, csk->state, csk->flags, csk->tid); if (req->status == CPL_ERR_RTX_NEG_ADVICE || req->status == CPL_ERR_PERSIST_NEG_ADVICE) { goto done; } cxgbi_sock_get(csk); spin_lock_bh(&csk->lock); if (!cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) { cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD); cxgbi_sock_set_state(csk, CTP_ABORTING); goto out; } cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD); send_abort_rpl(csk, rst_status); if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) { csk->err = abort_status_to_errno(csk, req->status, &rst_status); cxgbi_sock_closed(csk); } out: spin_unlock_bh(&csk->lock); cxgbi_sock_put(csk); done: __kfree_skb(skb); return 0; } /* * Process ABORT_RPL_RSS CPL message: -> host * Process abort replies. We only process these messages if we anticipate * them as the coordination between SW and HW in this area is somewhat lacking * and sometimes we get ABORT_RPLs after we are done with the connection that * originated the ABORT_REQ. */ static int do_abort_rpl(struct t3cdev *cdev, struct sk_buff *skb, void *ctx) { struct cpl_abort_rpl_rss *rpl = cplhdr(skb); struct cxgbi_sock *csk = ctx; log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "status 0x%x, csk 0x%p, s %u, 0x%lx.\n", rpl->status, csk, csk ? csk->state : 0, csk ? csk->flags : 0UL); /* * Ignore replies to post-close aborts indicating that the abort was * requested too late. These connections are terminated when we get * PEER_CLOSE or CLOSE_CON_RPL and by the time the abort_rpl_rss * arrives the TID is either no longer used or it has been recycled. */ if (rpl->status == CPL_ERR_ABORT_FAILED) goto rel_skb; /* * Sometimes we've already closed the connection, e.g., a post-close * abort races with ABORT_REQ_RSS, the latter frees the connection * expecting the ABORT_REQ will fail with CPL_ERR_ABORT_FAILED, * but FW turns the ABORT_REQ into a regular one and so we get * ABORT_RPL_RSS with status 0 and no connection. */ if (csk) cxgbi_sock_rcv_abort_rpl(csk); rel_skb: __kfree_skb(skb); return 0; } /* * Process RX_ISCSI_HDR CPL message: -> host * Handle received PDUs, the payload could be DDP'ed. If not, the payload * follow after the bhs. */ static int do_iscsi_hdr(struct t3cdev *t3dev, struct sk_buff *skb, void *ctx) { struct cxgbi_sock *csk = ctx; struct cpl_iscsi_hdr *hdr_cpl = cplhdr(skb); struct cpl_iscsi_hdr_norss data_cpl; struct cpl_rx_data_ddp_norss ddp_cpl; unsigned int hdr_len, data_len, status; unsigned int len; int err; log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, "csk 0x%p,%u,0x%lx,%u, skb 0x%p,%u.\n", csk, csk->state, csk->flags, csk->tid, skb, skb->len); spin_lock_bh(&csk->lock); if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u, bad state.\n", csk, csk->state, csk->flags, csk->tid); if (csk->state != CTP_ABORTING) goto abort_conn; else goto discard; } cxgbi_skcb_tcp_seq(skb) = ntohl(hdr_cpl->seq); cxgbi_skcb_flags(skb) = 0; skb_reset_transport_header(skb); __skb_pull(skb, sizeof(struct cpl_iscsi_hdr)); len = hdr_len = ntohs(hdr_cpl->len); /* msg coalesce is off or not enough data received */ if (skb->len <= hdr_len) { pr_err("%s: tid %u, CPL_ISCSI_HDR, skb len %u < %u.\n", csk->cdev->ports[csk->port_id]->name, csk->tid, skb->len, hdr_len); goto abort_conn; } cxgbi_skcb_set_flag(skb, SKCBF_RX_COALESCED); err = skb_copy_bits(skb, skb->len - sizeof(ddp_cpl), &ddp_cpl, sizeof(ddp_cpl)); if (err < 0) { pr_err("%s: tid %u, copy cpl_ddp %u-%zu failed %d.\n", csk->cdev->ports[csk->port_id]->name, csk->tid, skb->len, sizeof(ddp_cpl), err); goto abort_conn; } cxgbi_skcb_set_flag(skb, SKCBF_RX_STATUS); cxgbi_skcb_rx_pdulen(skb) = ntohs(ddp_cpl.len); cxgbi_skcb_rx_ddigest(skb) = ntohl(ddp_cpl.ulp_crc); status = ntohl(ddp_cpl.ddp_status); log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, "csk 0x%p, skb 0x%p,%u, pdulen %u, status 0x%x.\n", csk, skb, skb->len, cxgbi_skcb_rx_pdulen(skb), status); if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) cxgbi_skcb_set_flag(skb, SKCBF_RX_HCRC_ERR); if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) cxgbi_skcb_set_flag(skb, SKCBF_RX_DCRC_ERR); if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) cxgbi_skcb_set_flag(skb, SKCBF_RX_PAD_ERR); if (skb->len > (hdr_len + sizeof(ddp_cpl))) { err = skb_copy_bits(skb, hdr_len, &data_cpl, sizeof(data_cpl)); if (err < 0) { pr_err("%s: tid %u, cp %zu/%u failed %d.\n", csk->cdev->ports[csk->port_id]->name, csk->tid, sizeof(data_cpl), skb->len, err); goto abort_conn; } data_len = ntohs(data_cpl.len); log_debug(1 << CXGBI_DBG_DDP | 1 << CXGBI_DBG_PDU_RX, "skb 0x%p, pdu not ddp'ed %u/%u, status 0x%x.\n", skb, data_len, cxgbi_skcb_rx_pdulen(skb), status); len += sizeof(data_cpl) + data_len; } else if (status & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA_DDPD); csk->rcv_nxt = ntohl(ddp_cpl.seq) + cxgbi_skcb_rx_pdulen(skb); __pskb_trim(skb, len); __skb_queue_tail(&csk->receive_queue, skb); cxgbi_conn_pdu_ready(csk); spin_unlock_bh(&csk->lock); return 0; abort_conn: send_abort_req(csk); discard: spin_unlock_bh(&csk->lock); __kfree_skb(skb); return 0; } /* * Process TX_DATA_ACK CPL messages: -> host * Process an acknowledgment of WR completion. Advance snd_una and send the * next batch of work requests from the write queue. */ static int do_wr_ack(struct t3cdev *cdev, struct sk_buff *skb, void *ctx) { struct cxgbi_sock *csk = ctx; struct cpl_wr_ack *hdr = cplhdr(skb); log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, "csk 0x%p,%u,0x%lx,%u, cr %u.\n", csk, csk->state, csk->flags, csk->tid, ntohs(hdr->credits)); cxgbi_sock_rcv_wr_ack(csk, ntohs(hdr->credits), ntohl(hdr->snd_una), 1); __kfree_skb(skb); return 0; } /* * for each connection, pre-allocate skbs needed for close/abort requests. So * that we can service the request right away. */ static int alloc_cpls(struct cxgbi_sock *csk) { csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req), 0, GFP_KERNEL); if (!csk->cpl_close) return -ENOMEM; csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req), 0, GFP_KERNEL); if (!csk->cpl_abort_req) goto free_cpl_skbs; csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl), 0, GFP_KERNEL); if (!csk->cpl_abort_rpl) goto free_cpl_skbs; return 0; free_cpl_skbs: cxgbi_sock_free_cpl_skbs(csk); return -ENOMEM; } static void l2t_put(struct cxgbi_sock *csk) { struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev; if (csk->l2t) { l2t_release(t3dev, csk->l2t); csk->l2t = NULL; cxgbi_sock_put(csk); } } /* * release_offload_resources - release offload resource * Release resources held by an offload connection (TID, L2T entry, etc.) */ static void release_offload_resources(struct cxgbi_sock *csk) { struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev; log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", csk, csk->state, csk->flags, csk->tid); csk->rss_qid = 0; cxgbi_sock_free_cpl_skbs(csk); if (csk->wr_cred != csk->wr_max_cred) { cxgbi_sock_purge_wr_queue(csk); cxgbi_sock_reset_wr_list(csk); } l2t_put(csk); if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) free_atid(csk); else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) { cxgb3_remove_tid(t3dev, (void *)csk, csk->tid); cxgbi_sock_clear_flag(csk, CTPF_HAS_TID); cxgbi_sock_put(csk); } csk->dst = NULL; csk->cdev = NULL; } static void update_address(struct cxgbi_hba *chba) { if (chba->ipv4addr) { if (chba->vdev && chba->ipv4addr != cxgb3i_get_private_ipv4addr(chba->vdev)) { cxgb3i_set_private_ipv4addr(chba->vdev, chba->ipv4addr); cxgb3i_set_private_ipv4addr(chba->ndev, 0); pr_info("%s set %pI4.\n", chba->vdev->name, &chba->ipv4addr); } else if (chba->ipv4addr != cxgb3i_get_private_ipv4addr(chba->ndev)) { cxgb3i_set_private_ipv4addr(chba->ndev, chba->ipv4addr); pr_info("%s set %pI4.\n", chba->ndev->name, &chba->ipv4addr); } } else if (cxgb3i_get_private_ipv4addr(chba->ndev)) { if (chba->vdev) cxgb3i_set_private_ipv4addr(chba->vdev, 0); cxgb3i_set_private_ipv4addr(chba->ndev, 0); } } static int init_act_open(struct cxgbi_sock *csk) { struct dst_entry *dst = csk->dst; struct cxgbi_device *cdev = csk->cdev; struct t3cdev *t3dev = (struct t3cdev *)cdev->lldev; struct net_device *ndev = cdev->ports[csk->port_id]; struct cxgbi_hba *chba = cdev->hbas[csk->port_id]; struct sk_buff *skb = NULL; int ret; log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx.\n", csk, csk->state, csk->flags); update_address(chba); if (chba->ipv4addr) csk->saddr.sin_addr.s_addr = chba->ipv4addr; csk->rss_qid = 0; csk->l2t = t3_l2t_get(t3dev, dst, ndev, &csk->daddr.sin_addr.s_addr); if (!csk->l2t) { pr_err("NO l2t available.\n"); return -EINVAL; } cxgbi_sock_get(csk); csk->atid = cxgb3_alloc_atid(t3dev, &t3_client, csk); if (csk->atid < 0) { pr_err("NO atid available.\n"); ret = -EINVAL; goto put_sock; } cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); cxgbi_sock_get(csk); skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_KERNEL); if (!skb) { ret = -ENOMEM; goto free_atid; } skb->sk = (struct sock *)csk; set_arp_failure_handler(skb, act_open_arp_failure); csk->snd_win = cxgb3i_snd_win; csk->rcv_win = cxgb3i_rcv_win; csk->wr_max_cred = csk->wr_cred = T3C_DATA(t3dev)->max_wrs - 1; csk->wr_una_cred = 0; csk->mss_idx = cxgbi_sock_select_mss(csk, dst_mtu(dst)); cxgbi_sock_reset_wr_list(csk); csk->err = 0; log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx, %pI4:%u-%pI4:%u.\n", csk, csk->state, csk->flags, &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port), &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port)); cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN); send_act_open_req(csk, skb, csk->l2t); return 0; free_atid: cxgb3_free_atid(t3dev, csk->atid); put_sock: cxgbi_sock_put(csk); l2t_release(t3dev, csk->l2t); csk->l2t = NULL; return ret; } cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS] = { [CPL_ACT_ESTABLISH] = do_act_establish, [CPL_ACT_OPEN_RPL] = do_act_open_rpl, [CPL_PEER_CLOSE] = do_peer_close, [CPL_ABORT_REQ_RSS] = do_abort_req, [CPL_ABORT_RPL_RSS] = do_abort_rpl, [CPL_CLOSE_CON_RPL] = do_close_con_rpl, [CPL_TX_DMA_ACK] = do_wr_ack, [CPL_ISCSI_HDR] = do_iscsi_hdr, }; /** * cxgb3i_ofld_init - allocate and initialize resources for each adapter found * @cdev: cxgbi adapter */ static int cxgb3i_ofld_init(struct cxgbi_device *cdev) { struct t3cdev *t3dev = (struct t3cdev *)cdev->lldev; struct adap_ports port; struct ofld_page_info rx_page_info; unsigned int wr_len; int rc; if (t3dev->ctl(t3dev, GET_WR_LEN, &wr_len) < 0 || t3dev->ctl(t3dev, GET_PORTS, &port) < 0 || t3dev->ctl(t3dev, GET_RX_PAGE_INFO, &rx_page_info) < 0) { pr_warn("t3 0x%p, offload up, ioctl failed.\n", t3dev); return -EINVAL; } if (cxgb3i_max_connect > CXGBI_MAX_CONN) cxgb3i_max_connect = CXGBI_MAX_CONN; rc = cxgbi_device_portmap_create(cdev, cxgb3i_sport_base, cxgb3i_max_connect); if (rc < 0) return rc; init_wr_tab(wr_len); cdev->csk_release_offload_resources = release_offload_resources; cdev->csk_push_tx_frames = push_tx_frames; cdev->csk_send_abort_req = send_abort_req; cdev->csk_send_close_req = send_close_req; cdev->csk_send_rx_credits = send_rx_credits; cdev->csk_alloc_cpls = alloc_cpls; cdev->csk_init_act_open = init_act_open; pr_info("cdev 0x%p, offload up, added.\n", cdev); return 0; } /* * functions to program the pagepod in h/w */ static inline void ulp_mem_io_set_hdr(struct sk_buff *skb, unsigned int addr) { struct ulp_mem_io *req = (struct ulp_mem_io *)skb->head; memset(req, 0, sizeof(*req)); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS)); req->cmd_lock_addr = htonl(V_ULP_MEMIO_ADDR(addr >> 5) | V_ULPTX_CMD(ULP_MEM_WRITE)); req->len = htonl(V_ULP_MEMIO_DATA_LEN(IPPOD_SIZE >> 5) | V_ULPTX_NFLITS((IPPOD_SIZE >> 3) + 1)); } static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev) { return ((struct t3cdev *)cdev->lldev)->ulp_iscsi; } static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk, struct cxgbi_task_tag_info *ttinfo) { unsigned int idx = ttinfo->idx; unsigned int npods = ttinfo->npods; struct scatterlist *sg = ttinfo->sgl; struct cxgbi_pagepod *ppod; struct ulp_mem_io *req; unsigned int sg_off; unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit; int i; for (i = 0; i < npods; i++, idx++, pm_addr += IPPOD_SIZE) { struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) + IPPOD_SIZE, 0, GFP_ATOMIC); if (!skb) return -ENOMEM; ulp_mem_io_set_hdr(skb, pm_addr); req = (struct ulp_mem_io *)skb->head; ppod = (struct cxgbi_pagepod *)(req + 1); sg_off = i * PPOD_PAGES_MAX; cxgbi_ddp_set_one_ppod(ppod, ttinfo, &sg, &sg_off); skb->priority = CPL_PRIORITY_CONTROL; cxgb3_ofld_send(ppm->lldev, skb); } return 0; } static void ddp_clear_map(struct cxgbi_device *cdev, struct cxgbi_ppm *ppm, struct cxgbi_task_tag_info *ttinfo) { unsigned int idx = ttinfo->idx; unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit; unsigned int npods = ttinfo->npods; int i; log_debug(1 << CXGBI_DBG_DDP, "cdev 0x%p, clear idx %u, npods %u.\n", cdev, idx, npods); for (i = 0; i < npods; i++, idx++, pm_addr += IPPOD_SIZE) { struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) + IPPOD_SIZE, 0, GFP_ATOMIC); if (!skb) { pr_err("cdev 0x%p, clear ddp, %u,%d/%u, skb OOM.\n", cdev, idx, i, npods); continue; } ulp_mem_io_set_hdr(skb, pm_addr); skb->priority = CPL_PRIORITY_CONTROL; cxgb3_ofld_send(ppm->lldev, skb); } } static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, int pg_idx) { struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0, GFP_KERNEL); struct cpl_set_tcb_field *req; u64 val = pg_idx < DDP_PGIDX_MAX ? pg_idx : 0; log_debug(1 << CXGBI_DBG_DDP, "csk 0x%p, tid %u, pg_idx %d.\n", csk, tid, pg_idx); if (!skb) return -ENOMEM; /* set up ulp submode and page size */ req = (struct cpl_set_tcb_field *)skb->head; req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); req->reply = V_NO_REPLY(1); req->cpu_idx = 0; req->word = htons(31); req->mask = cpu_to_be64(0xF0000000); req->val = cpu_to_be64(val << 28); skb->priority = CPL_PRIORITY_CONTROL; cxgb3_ofld_send(csk->cdev->lldev, skb); return 0; } /** * ddp_setup_conn_digest - setup conn. digest setting * @csk: cxgb tcp socket * @tid: connection id * @hcrc: header digest enabled * @dcrc: data digest enabled * set up the iscsi digest settings for a connection identified by tid */ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, int hcrc, int dcrc) { struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0, GFP_KERNEL); struct cpl_set_tcb_field *req; u64 val = (hcrc ? 1 : 0) | (dcrc ? 2 : 0); log_debug(1 << CXGBI_DBG_DDP, "csk 0x%p, tid %u, crc %d,%d.\n", csk, tid, hcrc, dcrc); if (!skb) return -ENOMEM; /* set up ulp submode and page size */ req = (struct cpl_set_tcb_field *)skb->head; req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); req->reply = V_NO_REPLY(1); req->cpu_idx = 0; req->word = htons(31); req->mask = cpu_to_be64(0x0F000000); req->val = cpu_to_be64(val << 24); skb->priority = CPL_PRIORITY_CONTROL; cxgb3_ofld_send(csk->cdev->lldev, skb); return 0; } /** * cxgb3i_ddp_init - initialize the cxgb3 adapter's ddp resource * @cdev: cxgb3i adapter * initialize the ddp pagepod manager for a given adapter */ static int cxgb3i_ddp_init(struct cxgbi_device *cdev) { struct t3cdev *tdev = (struct t3cdev *)cdev->lldev; struct net_device *ndev = cdev->ports[0]; struct cxgbi_tag_format tformat; unsigned int ppmax, tagmask = 0; struct ulp_iscsi_info uinfo; int i, err; err = tdev->ctl(tdev, ULP_ISCSI_GET_PARAMS, &uinfo); if (err < 0) { pr_err("%s, failed to get iscsi param %d.\n", ndev->name, err); return err; } if (uinfo.llimit >= uinfo.ulimit) { pr_warn("T3 %s, iscsi NOT enabled %u ~ %u!\n", ndev->name, uinfo.llimit, uinfo.ulimit); return -EACCES; } ppmax = (uinfo.ulimit - uinfo.llimit + 1) >> PPOD_SIZE_SHIFT; tagmask = cxgbi_tagmask_set(ppmax); pr_info("T3 %s: 0x%x~0x%x, 0x%x, tagmask 0x%x -> 0x%x.\n", ndev->name, uinfo.llimit, uinfo.ulimit, ppmax, uinfo.tagmask, tagmask); memset(&tformat, 0, sizeof(struct cxgbi_tag_format)); for (i = 0; i < 4; i++) tformat.pgsz_order[i] = uinfo.pgsz_factor[i]; cxgbi_tagmask_check(tagmask, &tformat); err = cxgbi_ddp_ppm_setup(&tdev->ulp_iscsi, cdev, &tformat, (uinfo.ulimit - uinfo.llimit + 1), uinfo.llimit, uinfo.llimit, 0, 0, 0); if (err) return err; if (!(cdev->flags & CXGBI_FLAG_DDP_OFF)) { uinfo.tagmask = tagmask; uinfo.ulimit = uinfo.llimit + (ppmax << PPOD_SIZE_SHIFT); err = tdev->ctl(tdev, ULP_ISCSI_SET_PARAMS, &uinfo); if (err < 0) { pr_err("T3 %s fail to set iscsi param %d.\n", ndev->name, err); cdev->flags |= CXGBI_FLAG_DDP_OFF; } err = 0; } cdev->csk_ddp_setup_digest = ddp_setup_conn_digest; cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx; cdev->csk_ddp_set_map = ddp_set_map; cdev->csk_ddp_clear_map = ddp_clear_map; cdev->cdev2ppm = cdev2ppm; cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, uinfo.max_txsz - ISCSI_PDU_NONPAYLOAD_LEN); cdev->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, uinfo.max_rxsz - ISCSI_PDU_NONPAYLOAD_LEN); return 0; } static void cxgb3i_dev_close(struct t3cdev *t3dev) { struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev); if (!cdev || cdev->flags & CXGBI_FLAG_ADAPTER_RESET) { pr_info("0x%p close, f 0x%x.\n", cdev, cdev ? cdev->flags : 0); return; } cxgbi_device_unregister(cdev); } /** * cxgb3i_dev_open - init a t3 adapter structure and any h/w settings * @t3dev: t3cdev adapter */ static void cxgb3i_dev_open(struct t3cdev *t3dev) { struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev); struct adapter *adapter = tdev2adap(t3dev); int i, err; if (cdev) { pr_info("0x%p, updating.\n", cdev); return; } cdev = cxgbi_device_register(0, adapter->params.nports); if (!cdev) { pr_warn("device 0x%p register failed.\n", t3dev); return; } cdev->flags = CXGBI_FLAG_DEV_T3 | CXGBI_FLAG_IPV4_SET; cdev->lldev = t3dev; cdev->pdev = adapter->pdev; cdev->ports = adapter->port; cdev->nports = adapter->params.nports; cdev->mtus = adapter->params.mtus; cdev->nmtus = NMTUS; cdev->rx_credit_thres = cxgb3i_rx_credit_thres; cdev->skb_tx_rsvd = CXGB3I_TX_HEADER_LEN; cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr_norss); cdev->itp = &cxgb3i_iscsi_transport; err = cxgb3i_ddp_init(cdev); if (err) { pr_info("0x%p ddp init failed %d\n", cdev, err); goto err_out; } err = cxgb3i_ofld_init(cdev); if (err) { pr_info("0x%p offload init failed\n", cdev); goto err_out; } err = cxgbi_hbas_add(cdev, CXGB3I_MAX_LUN, CXGBI_MAX_CONN, &cxgb3i_host_template, cxgb3i_stt); if (err) goto err_out; for (i = 0; i < cdev->nports; i++) cdev->hbas[i]->ipv4addr = cxgb3i_get_private_ipv4addr(cdev->ports[i]); pr_info("cdev 0x%p, f 0x%x, t3dev 0x%p open, err %d.\n", cdev, cdev ? cdev->flags : 0, t3dev, err); return; err_out: cxgbi_device_unregister(cdev); } static void cxgb3i_dev_event_handler(struct t3cdev *t3dev, u32 event, u32 port) { struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev); log_debug(1 << CXGBI_DBG_TOE, "0x%p, cdev 0x%p, event 0x%x, port 0x%x.\n", t3dev, cdev, event, port); if (!cdev) return; switch (event) { case OFFLOAD_STATUS_DOWN: cdev->flags |= CXGBI_FLAG_ADAPTER_RESET; break; case OFFLOAD_STATUS_UP: cdev->flags &= ~CXGBI_FLAG_ADAPTER_RESET; break; } } /** * cxgb3i_init_module - module init entry point * * initialize any driver wide global data structures and register itself * with the cxgb3 module */ static int __init cxgb3i_init_module(void) { int rc; printk(KERN_INFO "%s", version); rc = cxgbi_iscsi_init(&cxgb3i_iscsi_transport, &cxgb3i_stt); if (rc < 0) return rc; cxgb3_register_client(&t3_client); return 0; } /** * cxgb3i_exit_module - module cleanup/exit entry point * * go through the driver hba list and for each hba, release any resource held. * and unregisters iscsi transport and the cxgb3 module */ static void __exit cxgb3i_exit_module(void) { cxgb3_unregister_client(&t3_client); cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T3); cxgbi_iscsi_cleanup(&cxgb3i_iscsi_transport, &cxgb3i_stt); } module_init(cxgb3i_init_module); module_exit(cxgb3i_exit_module);
linux-master
drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2017 Broadcom. All Rights Reserved. * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. * * Contact Information: * [email protected] */ #include <scsi/iscsi_proto.h> #include "be_main.h" #include "be.h" #include "be_mgmt.h" /* UE Status Low CSR */ static const char * const desc_ue_status_low[] = { "CEV", "CTX", "DBUF", "ERX", "Host", "MPU", "NDMA", "PTC ", "RDMA ", "RXF ", "RXIPS ", "RXULP0 ", "RXULP1 ", "RXULP2 ", "TIM ", "TPOST ", "TPRE ", "TXIPS ", "TXULP0 ", "TXULP1 ", "UC ", "WDMA ", "TXULP2 ", "HOST1 ", "P0_OB_LINK ", "P1_OB_LINK ", "HOST_GPIO ", "MBOX ", "AXGMAC0", "AXGMAC1", "JTAG", "MPU_INTPEND" }; /* UE Status High CSR */ static const char * const desc_ue_status_hi[] = { "LPCMEMHOST", "MGMT_MAC", "PCS0ONLINE", "MPU_IRAM", "PCS1ONLINE", "PCTL0", "PCTL1", "PMEM", "RR", "TXPB", "RXPP", "XAUI", "TXP", "ARM", "IPC", "HOST2", "HOST3", "HOST4", "HOST5", "HOST6", "HOST7", "HOST8", "HOST9", "NETC", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown" }; struct be_mcc_wrb *alloc_mcc_wrb(struct beiscsi_hba *phba, unsigned int *ref_tag) { struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; struct be_mcc_wrb *wrb = NULL; unsigned int tag; spin_lock(&phba->ctrl.mcc_lock); if (mccq->used == mccq->len) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, "BC_%d : MCC queue full: WRB used %u tag avail %u\n", mccq->used, phba->ctrl.mcc_tag_available); goto alloc_failed; } if (!phba->ctrl.mcc_tag_available) goto alloc_failed; tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index]; if (!tag) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, "BC_%d : MCC tag 0 allocated: tag avail %u alloc index %u\n", phba->ctrl.mcc_tag_available, phba->ctrl.mcc_alloc_index); goto alloc_failed; } /* return this tag for further reference */ *ref_tag = tag; phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0; phba->ctrl.mcc_tag_status[tag] = 0; phba->ctrl.ptag_state[tag].tag_state = 0; phba->ctrl.ptag_state[tag].cbfn = NULL; phba->ctrl.mcc_tag_available--; if (phba->ctrl.mcc_alloc_index == (MAX_MCC_CMD - 1)) phba->ctrl.mcc_alloc_index = 0; else phba->ctrl.mcc_alloc_index++; wrb = queue_head_node(mccq); memset(wrb, 0, sizeof(*wrb)); wrb->tag0 = tag; wrb->tag0 |= (mccq->head << MCC_Q_WRB_IDX_SHIFT) & MCC_Q_WRB_IDX_MASK; queue_head_inc(mccq); mccq->used++; alloc_failed: spin_unlock(&phba->ctrl.mcc_lock); return wrb; } void free_mcc_wrb(struct be_ctrl_info *ctrl, unsigned int tag) { struct be_queue_info *mccq = &ctrl->mcc_obj.q; spin_lock(&ctrl->mcc_lock); tag = tag & MCC_Q_CMD_TAG_MASK; ctrl->mcc_tag[ctrl->mcc_free_index] = tag; if (ctrl->mcc_free_index == (MAX_MCC_CMD - 1)) ctrl->mcc_free_index = 0; else ctrl->mcc_free_index++; ctrl->mcc_tag_available++; mccq->used--; spin_unlock(&ctrl->mcc_lock); } /* * beiscsi_mcc_compl_status - Return the status of MCC completion * @phba: Driver private structure * @tag: Tag for the MBX Command * @wrb: the WRB used for the MBX Command * @mbx_cmd_mem: ptr to memory allocated for MBX Cmd * * return * Success: 0 * Failure: Non-Zero */ int __beiscsi_mcc_compl_status(struct beiscsi_hba *phba, unsigned int tag, struct be_mcc_wrb **wrb, struct be_dma_mem *mbx_cmd_mem) { struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; uint16_t status = 0, addl_status = 0, wrb_num = 0; struct be_cmd_resp_hdr *mbx_resp_hdr; struct be_cmd_req_hdr *mbx_hdr; struct be_mcc_wrb *temp_wrb; uint32_t mcc_tag_status; int rc = 0; mcc_tag_status = phba->ctrl.mcc_tag_status[tag]; status = (mcc_tag_status & CQE_STATUS_MASK); addl_status = ((mcc_tag_status & CQE_STATUS_ADDL_MASK) >> CQE_STATUS_ADDL_SHIFT); if (mbx_cmd_mem) { mbx_hdr = (struct be_cmd_req_hdr *)mbx_cmd_mem->va; } else { wrb_num = (mcc_tag_status & CQE_STATUS_WRB_MASK) >> CQE_STATUS_WRB_SHIFT; temp_wrb = (struct be_mcc_wrb *)queue_get_wrb(mccq, wrb_num); mbx_hdr = embedded_payload(temp_wrb); if (wrb) *wrb = temp_wrb; } if (status || addl_status) { beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT | BEISCSI_LOG_EH | BEISCSI_LOG_CONFIG, "BC_%d : MBX Cmd Failed for Subsys : %d Opcode : %d with Status : %d and Extd_Status : %d\n", mbx_hdr->subsystem, mbx_hdr->opcode, status, addl_status); rc = -EIO; if (status == MCC_STATUS_INSUFFICIENT_BUFFER) { mbx_resp_hdr = (struct be_cmd_resp_hdr *)mbx_hdr; beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT | BEISCSI_LOG_EH | BEISCSI_LOG_CONFIG, "BC_%d : Insufficient Buffer Error Resp_Len : %d Actual_Resp_Len : %d\n", mbx_resp_hdr->response_length, mbx_resp_hdr->actual_resp_len); rc = -EAGAIN; } } return rc; } /* * beiscsi_mccq_compl_wait()- Process completion in MCC CQ * @phba: Driver private structure * @tag: Tag for the MBX Command * @wrb: the WRB used for the MBX Command * @mbx_cmd_mem: ptr to memory allocated for MBX Cmd * * Waits for MBX completion with the passed TAG. * * return * Success: 0 * Failure: Non-Zero **/ int beiscsi_mccq_compl_wait(struct beiscsi_hba *phba, unsigned int tag, struct be_mcc_wrb **wrb, struct be_dma_mem *mbx_cmd_mem) { int rc = 0; if (!tag || tag > MAX_MCC_CMD) { __beiscsi_log(phba, KERN_ERR, "BC_%d : invalid tag %u\n", tag); return -EINVAL; } if (beiscsi_hba_in_error(phba)) { clear_bit(MCC_TAG_STATE_RUNNING, &phba->ctrl.ptag_state[tag].tag_state); return -EIO; } /* wait for the mccq completion */ rc = wait_event_interruptible_timeout(phba->ctrl.mcc_wait[tag], phba->ctrl.mcc_tag_status[tag], msecs_to_jiffies( BEISCSI_HOST_MBX_TIMEOUT)); /** * Return EIO if port is being disabled. Associated DMA memory, if any, * is freed by the caller. When port goes offline, MCCQ is cleaned up * so does WRB. */ if (!test_bit(BEISCSI_HBA_ONLINE, &phba->state)) { clear_bit(MCC_TAG_STATE_RUNNING, &phba->ctrl.ptag_state[tag].tag_state); return -EIO; } /** * If MBOX cmd timeout expired, tag and resource allocated * for cmd is not freed until FW returns completion. */ if (rc <= 0) { struct be_dma_mem *tag_mem; /** * PCI/DMA memory allocated and posted in non-embedded mode * will have mbx_cmd_mem != NULL. * Save virtual and bus addresses for the command so that it * can be freed later. **/ tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state; if (mbx_cmd_mem) { tag_mem->size = mbx_cmd_mem->size; tag_mem->va = mbx_cmd_mem->va; tag_mem->dma = mbx_cmd_mem->dma; } else tag_mem->size = 0; /* first make tag_mem_state visible to all */ wmb(); set_bit(MCC_TAG_STATE_TIMEOUT, &phba->ctrl.ptag_state[tag].tag_state); beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT | BEISCSI_LOG_EH | BEISCSI_LOG_CONFIG, "BC_%d : MBX Cmd Completion timed out\n"); return -EBUSY; } rc = __beiscsi_mcc_compl_status(phba, tag, wrb, mbx_cmd_mem); free_mcc_wrb(&phba->ctrl, tag); return rc; } /* * beiscsi_process_mbox_compl()- Check the MBX completion status * @ctrl: Function specific MBX data structure * @compl: Completion status of MBX Command * * Check for the MBX completion status when BMBX method used * * return * Success: Zero * Failure: Non-Zero **/ static int beiscsi_process_mbox_compl(struct be_ctrl_info *ctrl, struct be_mcc_compl *compl) { struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); struct be_cmd_req_hdr *hdr = embedded_payload(wrb); u16 compl_status, extd_status; /** * To check if valid bit is set, check the entire word as we don't know * the endianness of the data (old entry is host endian while a new * entry is little endian) */ if (!compl->flags) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, "BC_%d : BMBX busy, no completion\n"); return -EBUSY; } compl->flags = le32_to_cpu(compl->flags); WARN_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0); /** * Just swap the status to host endian; * mcc tag is opaquely copied from mcc_wrb. */ be_dws_le_to_cpu(compl, 4); compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) & CQE_STATUS_COMPL_MASK; extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) & CQE_STATUS_EXTD_MASK; /* Need to reset the entire word that houses the valid bit */ compl->flags = 0; if (compl_status == MCC_STATUS_SUCCESS) return 0; beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, "BC_%d : error in cmd completion: Subsystem : %d Opcode : %d status(compl/extd)=%d/%d\n", hdr->subsystem, hdr->opcode, compl_status, extd_status); return compl_status; } static void beiscsi_process_async_link(struct beiscsi_hba *phba, struct be_mcc_compl *compl) { struct be_async_event_link_state *evt; evt = (struct be_async_event_link_state *)compl; phba->port_speed = evt->port_speed; /** * Check logical link status in ASYNC event. * This has been newly introduced in SKH-R Firmware 10.0.338.45. **/ if (evt->port_link_status & BE_ASYNC_LINK_UP_MASK) { set_bit(BEISCSI_HBA_LINK_UP, &phba->state); if (test_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state)) beiscsi_start_boot_work(phba, BE_BOOT_INVALID_SHANDLE); __beiscsi_log(phba, KERN_ERR, "BC_%d : Link Up on Port %d tag 0x%x\n", evt->physical_port, evt->event_tag); } else { clear_bit(BEISCSI_HBA_LINK_UP, &phba->state); __beiscsi_log(phba, KERN_ERR, "BC_%d : Link Down on Port %d tag 0x%x\n", evt->physical_port, evt->event_tag); iscsi_host_for_each_session(phba->shost, beiscsi_session_fail); } } static char *beiscsi_port_misconf_event_msg[] = { "Physical Link is functional.", "Optics faulted/incorrectly installed/not installed - Reseat optics, if issue not resolved, replace.", "Optics of two types installed - Remove one optic or install matching pair of optics.", "Incompatible optics - Replace with compatible optics for card to function.", "Unqualified optics - Replace with Avago optics for Warranty and Technical Support.", "Uncertified optics - Replace with Avago Certified optics to enable link operation." }; static void beiscsi_process_async_sli(struct beiscsi_hba *phba, struct be_mcc_compl *compl) { struct be_async_event_sli *async_sli; u8 evt_type, state, old_state, le; char *sev = KERN_WARNING; char *msg = NULL; evt_type = compl->flags >> ASYNC_TRAILER_EVENT_TYPE_SHIFT; evt_type &= ASYNC_TRAILER_EVENT_TYPE_MASK; /* processing only MISCONFIGURED physical port event */ if (evt_type != ASYNC_SLI_EVENT_TYPE_MISCONFIGURED) return; async_sli = (struct be_async_event_sli *)compl; state = async_sli->event_data1 >> (phba->fw_config.phys_port * 8) & 0xff; le = async_sli->event_data2 >> (phba->fw_config.phys_port * 8) & 0xff; old_state = phba->optic_state; phba->optic_state = state; if (state >= ARRAY_SIZE(beiscsi_port_misconf_event_msg)) { /* fw is reporting a state we don't know, log and return */ __beiscsi_log(phba, KERN_ERR, "BC_%d : Port %c: Unrecognized optic state 0x%x\n", phba->port_name, async_sli->event_data1); return; } if (ASYNC_SLI_LINK_EFFECT_VALID(le)) { /* log link effect for unqualified-4, uncertified-5 optics */ if (state > 3) msg = (ASYNC_SLI_LINK_EFFECT_STATE(le)) ? " Link is non-operational." : " Link is operational."; /* 1 - info */ if (ASYNC_SLI_LINK_EFFECT_SEV(le) == 1) sev = KERN_INFO; /* 2 - error */ if (ASYNC_SLI_LINK_EFFECT_SEV(le) == 2) sev = KERN_ERR; } if (old_state != phba->optic_state) __beiscsi_log(phba, sev, "BC_%d : Port %c: %s%s\n", phba->port_name, beiscsi_port_misconf_event_msg[state], !msg ? "" : msg); } void beiscsi_process_async_event(struct beiscsi_hba *phba, struct be_mcc_compl *compl) { char *sev = KERN_INFO; u8 evt_code; /* interpret flags as an async trailer */ evt_code = compl->flags >> ASYNC_TRAILER_EVENT_CODE_SHIFT; evt_code &= ASYNC_TRAILER_EVENT_CODE_MASK; switch (evt_code) { case ASYNC_EVENT_CODE_LINK_STATE: beiscsi_process_async_link(phba, compl); break; case ASYNC_EVENT_CODE_ISCSI: if (test_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state)) beiscsi_start_boot_work(phba, BE_BOOT_INVALID_SHANDLE); sev = KERN_ERR; break; case ASYNC_EVENT_CODE_SLI: beiscsi_process_async_sli(phba, compl); break; default: /* event not registered */ sev = KERN_ERR; } beiscsi_log(phba, sev, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, "BC_%d : ASYNC Event %x: status 0x%08x flags 0x%08x\n", evt_code, compl->status, compl->flags); } int beiscsi_process_mcc_compl(struct be_ctrl_info *ctrl, struct be_mcc_compl *compl) { struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); u16 compl_status, extd_status; struct be_dma_mem *tag_mem; unsigned int tag, wrb_idx; be_dws_le_to_cpu(compl, 4); tag = (compl->tag0 & MCC_Q_CMD_TAG_MASK); wrb_idx = (compl->tag0 & CQE_STATUS_WRB_MASK) >> CQE_STATUS_WRB_SHIFT; if (!test_bit(MCC_TAG_STATE_RUNNING, &ctrl->ptag_state[tag].tag_state)) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_MBOX | BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, "BC_%d : MBX cmd completed but not posted\n"); return 0; } /* end MCC with this tag */ clear_bit(MCC_TAG_STATE_RUNNING, &ctrl->ptag_state[tag].tag_state); if (test_bit(MCC_TAG_STATE_TIMEOUT, &ctrl->ptag_state[tag].tag_state)) { beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_MBOX | BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, "BC_%d : MBX Completion for timeout Command from FW\n"); /** * Check for the size before freeing resource. * Only for non-embedded cmd, PCI resource is allocated. **/ tag_mem = &ctrl->ptag_state[tag].tag_mem_state; if (tag_mem->size) { dma_free_coherent(&ctrl->pdev->dev, tag_mem->size, tag_mem->va, tag_mem->dma); tag_mem->size = 0; } free_mcc_wrb(ctrl, tag); return 0; } compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) & CQE_STATUS_COMPL_MASK; extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) & CQE_STATUS_EXTD_MASK; /* The ctrl.mcc_tag_status[tag] is filled with * [31] = valid, [30:24] = Rsvd, [23:16] = wrb, [15:8] = extd_status, * [7:0] = compl_status */ ctrl->mcc_tag_status[tag] = CQE_VALID_MASK; ctrl->mcc_tag_status[tag] |= (wrb_idx << CQE_STATUS_WRB_SHIFT); ctrl->mcc_tag_status[tag] |= (extd_status << CQE_STATUS_ADDL_SHIFT) & CQE_STATUS_ADDL_MASK; ctrl->mcc_tag_status[tag] |= (compl_status & CQE_STATUS_MASK); if (test_bit(MCC_TAG_STATE_ASYNC, &ctrl->ptag_state[tag].tag_state)) { if (ctrl->ptag_state[tag].cbfn) ctrl->ptag_state[tag].cbfn(phba, tag); else __beiscsi_log(phba, KERN_ERR, "BC_%d : MBX ASYNC command with no callback\n"); free_mcc_wrb(ctrl, tag); return 0; } if (test_bit(MCC_TAG_STATE_IGNORE, &ctrl->ptag_state[tag].tag_state)) { /* just check completion status and free wrb */ __beiscsi_mcc_compl_status(phba, tag, NULL, NULL); free_mcc_wrb(ctrl, tag); return 0; } wake_up_interruptible(&ctrl->mcc_wait[tag]); return 0; } void be_mcc_notify(struct beiscsi_hba *phba, unsigned int tag) { struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; u32 val = 0; set_bit(MCC_TAG_STATE_RUNNING, &phba->ctrl.ptag_state[tag].tag_state); val |= mccq->id & DB_MCCQ_RING_ID_MASK; val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT; /* make request available for DMA */ wmb(); iowrite32(val, phba->db_va + DB_MCCQ_OFFSET); } /* * be_mbox_db_ready_poll()- Check ready status * @ctrl: Function specific MBX data structure * * Check for the ready status of FW to send BMBX * commands to adapter. * * return * Success: 0 * Failure: Non-Zero **/ static int be_mbox_db_ready_poll(struct be_ctrl_info *ctrl) { /* wait 30s for generic non-flash MBOX operation */ #define BEISCSI_MBX_RDY_BIT_TIMEOUT 30000 void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET; struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); unsigned long timeout; u32 ready; /* * This BMBX busy wait path is used during init only. * For the commands executed during init, 5s should suffice. */ timeout = jiffies + msecs_to_jiffies(BEISCSI_MBX_RDY_BIT_TIMEOUT); do { if (beiscsi_hba_in_error(phba)) return -EIO; ready = ioread32(db); if (ready == 0xffffffff) return -EIO; ready &= MPU_MAILBOX_DB_RDY_MASK; if (ready) return 0; if (time_after(jiffies, timeout)) break; /* 1ms sleep is enough in most cases */ schedule_timeout_uninterruptible(msecs_to_jiffies(1)); } while (!ready); beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, "BC_%d : FW Timed Out\n"); set_bit(BEISCSI_HBA_FW_TIMEOUT, &phba->state); return -EBUSY; } /* * be_mbox_notify: Notify adapter of new BMBX command * @ctrl: Function specific MBX data structure * * Ring doorbell to inform adapter of a BMBX command * to process * * return * Success: 0 * Failure: Non-Zero **/ static int be_mbox_notify(struct be_ctrl_info *ctrl) { int status; u32 val = 0; void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET; struct be_dma_mem *mbox_mem = &ctrl->mbox_mem; struct be_mcc_mailbox *mbox = mbox_mem->va; status = be_mbox_db_ready_poll(ctrl); if (status) return status; val &= ~MPU_MAILBOX_DB_RDY_MASK; val |= MPU_MAILBOX_DB_HI_MASK; val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2; iowrite32(val, db); status = be_mbox_db_ready_poll(ctrl); if (status) return status; val = 0; val &= ~MPU_MAILBOX_DB_RDY_MASK; val &= ~MPU_MAILBOX_DB_HI_MASK; val |= (u32) (mbox_mem->dma >> 4) << 2; iowrite32(val, db); status = be_mbox_db_ready_poll(ctrl); if (status) return status; /* RDY is set; small delay before CQE read. */ udelay(1); status = beiscsi_process_mbox_compl(ctrl, &mbox->compl); return status; } void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, u32 payload_len, bool embedded, u8 sge_cnt) { if (embedded) wrb->emb_sgecnt_special |= MCC_WRB_EMBEDDED_MASK; else wrb->emb_sgecnt_special |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) << MCC_WRB_SGE_CNT_SHIFT; wrb->payload_length = payload_len; be_dws_cpu_to_le(wrb, 8); } void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr, u8 subsystem, u8 opcode, u32 cmd_len) { req_hdr->opcode = opcode; req_hdr->subsystem = subsystem; req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr)); req_hdr->timeout = BEISCSI_FW_MBX_TIMEOUT; } static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages, struct be_dma_mem *mem) { int i, buf_pages; u64 dma = (u64) mem->dma; buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages); for (i = 0; i < buf_pages; i++) { pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF); pages[i].hi = cpu_to_le32(upper_32_bits(dma)); dma += PAGE_SIZE_4K; } } static u32 eq_delay_to_mult(u32 usec_delay) { #define MAX_INTR_RATE 651042 const u32 round = 10; u32 multiplier; if (usec_delay == 0) multiplier = 0; else { u32 interrupt_rate = 1000000 / usec_delay; if (interrupt_rate == 0) multiplier = 1023; else { multiplier = (MAX_INTR_RATE - interrupt_rate) * round; multiplier /= interrupt_rate; multiplier = (multiplier + round / 2) / round; multiplier = min(multiplier, (u32) 1023); } } return multiplier; } struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem) { return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb; } int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl, struct be_queue_info *eq, int eq_delay) { struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); struct be_cmd_req_eq_create *req = embedded_payload(wrb); struct be_cmd_resp_eq_create *resp = embedded_payload(wrb); struct be_dma_mem *q_mem = &eq->dma_mem; int status; mutex_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_EQ_CREATE, sizeof(*req)); req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); AMAP_SET_BITS(struct amap_eq_context, func, req->context, PCI_FUNC(ctrl->pdev->devfn)); AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1); AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0); AMAP_SET_BITS(struct amap_eq_context, count, req->context, __ilog2_u32(eq->len / 256)); AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context, eq_delay_to_mult(eq_delay)); be_dws_cpu_to_le(req->context, sizeof(req->context)); be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); status = be_mbox_notify(ctrl); if (!status) { eq->id = le16_to_cpu(resp->eq_id); eq->created = true; } mutex_unlock(&ctrl->mbox_lock); return status; } int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl, struct be_queue_info *cq, struct be_queue_info *eq, bool sol_evts, bool no_delay, int coalesce_wm) { struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); struct be_cmd_req_cq_create *req = embedded_payload(wrb); struct be_cmd_resp_cq_create *resp = embedded_payload(wrb); struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); struct be_dma_mem *q_mem = &cq->dma_mem; void *ctxt = &req->context; int status; mutex_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_CQ_CREATE, sizeof(*req)); req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); if (is_chip_be2_be3r(phba)) { AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm); AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay); AMAP_SET_BITS(struct amap_cq_context, count, ctxt, __ilog2_u32(cq->len / 256)); AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1); AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts); AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1); AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id); AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1); AMAP_SET_BITS(struct amap_cq_context, func, ctxt, PCI_FUNC(ctrl->pdev->devfn)); } else { req->hdr.version = MBX_CMD_VER2; req->page_size = 1; AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm, ctxt, coalesce_wm); AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt, no_delay); AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt, __ilog2_u32(cq->len / 256)); AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1); AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1); AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id); AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1); } be_dws_cpu_to_le(ctxt, sizeof(req->context)); be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); status = be_mbox_notify(ctrl); if (!status) { cq->id = le16_to_cpu(resp->cq_id); cq->created = true; } else beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BC_%d : In be_cmd_cq_create, status=ox%08x\n", status); mutex_unlock(&ctrl->mbox_lock); return status; } static u32 be_encoded_q_len(int q_len) { u32 len_encoded = fls(q_len); /* log2(len) + 1 */ if (len_encoded == 16) len_encoded = 0; return len_encoded; } int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba, struct be_queue_info *mccq, struct be_queue_info *cq) { struct be_mcc_wrb *wrb; struct be_cmd_req_mcc_create_ext *req; struct be_dma_mem *q_mem = &mccq->dma_mem; struct be_ctrl_info *ctrl; void *ctxt; int status; mutex_lock(&phba->ctrl.mbox_lock); ctrl = &phba->ctrl; wrb = wrb_from_mbox(&ctrl->mbox_mem); memset(wrb, 0, sizeof(*wrb)); req = embedded_payload(wrb); ctxt = &req->context; be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req)); req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); req->async_evt_bitmap = 1 << ASYNC_EVENT_CODE_LINK_STATE; req->async_evt_bitmap |= 1 << ASYNC_EVENT_CODE_ISCSI; req->async_evt_bitmap |= 1 << ASYNC_EVENT_CODE_SLI; AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt, PCI_FUNC(phba->pcidev->devfn)); AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1); AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt, be_encoded_q_len(mccq->len)); AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id); be_dws_cpu_to_le(ctxt, sizeof(req->context)); be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); status = be_mbox_notify(ctrl); if (!status) { struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb); mccq->id = le16_to_cpu(resp->id); mccq->created = true; } mutex_unlock(&phba->ctrl.mbox_lock); return status; } int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q, int queue_type) { struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); struct be_cmd_req_q_destroy *req = embedded_payload(wrb); struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); u8 subsys = 0, opcode = 0; int status; beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "BC_%d : In beiscsi_cmd_q_destroy " "queue_type : %d\n", queue_type); mutex_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); switch (queue_type) { case QTYPE_EQ: subsys = CMD_SUBSYSTEM_COMMON; opcode = OPCODE_COMMON_EQ_DESTROY; break; case QTYPE_CQ: subsys = CMD_SUBSYSTEM_COMMON; opcode = OPCODE_COMMON_CQ_DESTROY; break; case QTYPE_MCCQ: subsys = CMD_SUBSYSTEM_COMMON; opcode = OPCODE_COMMON_MCC_DESTROY; break; case QTYPE_WRBQ: subsys = CMD_SUBSYSTEM_ISCSI; opcode = OPCODE_COMMON_ISCSI_WRBQ_DESTROY; break; case QTYPE_DPDUQ: subsys = CMD_SUBSYSTEM_ISCSI; opcode = OPCODE_COMMON_ISCSI_DEFQ_DESTROY; break; case QTYPE_SGL: subsys = CMD_SUBSYSTEM_ISCSI; opcode = OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES; break; default: mutex_unlock(&ctrl->mbox_lock); BUG(); } be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req)); if (queue_type != QTYPE_SGL) req->id = cpu_to_le16(q->id); status = be_mbox_notify(ctrl); mutex_unlock(&ctrl->mbox_lock); return status; } /** * be_cmd_create_default_pdu_queue()- Create DEFQ for the adapter * @ctrl: ptr to ctrl_info * @cq: Completion Queue * @dq: Default Queue * @length: ring size * @entry_size: size of each entry in DEFQ * @is_header: Header or Data DEFQ * @ulp_num: Bind to which ULP * * Create HDR/Data DEFQ for the passed ULP. Unsol PDU are posted * on this queue by the FW * * return * Success: 0 * Failure: Non-Zero Value * **/ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl, struct be_queue_info *cq, struct be_queue_info *dq, int length, int entry_size, uint8_t is_header, uint8_t ulp_num) { struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); struct be_defq_create_req *req = embedded_payload(wrb); struct be_dma_mem *q_mem = &dq->dma_mem; struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); void *ctxt = &req->context; int status; mutex_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req)); req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); if (phba->fw_config.dual_ulp_aware) { req->ulp_num = ulp_num; req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT); req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT); } if (is_chip_be2_be3r(phba)) { AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid, ctxt, 0); AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid_valid, ctxt, 1); AMAP_SET_BITS(struct amap_be_default_pdu_context, pci_func_id, ctxt, PCI_FUNC(ctrl->pdev->devfn)); AMAP_SET_BITS(struct amap_be_default_pdu_context, ring_size, ctxt, be_encoded_q_len(length / sizeof(struct phys_addr))); AMAP_SET_BITS(struct amap_be_default_pdu_context, default_buffer_size, ctxt, entry_size); AMAP_SET_BITS(struct amap_be_default_pdu_context, cq_id_recv, ctxt, cq->id); } else { AMAP_SET_BITS(struct amap_default_pdu_context_ext, rx_pdid, ctxt, 0); AMAP_SET_BITS(struct amap_default_pdu_context_ext, rx_pdid_valid, ctxt, 1); AMAP_SET_BITS(struct amap_default_pdu_context_ext, ring_size, ctxt, be_encoded_q_len(length / sizeof(struct phys_addr))); AMAP_SET_BITS(struct amap_default_pdu_context_ext, default_buffer_size, ctxt, entry_size); AMAP_SET_BITS(struct amap_default_pdu_context_ext, cq_id_recv, ctxt, cq->id); } be_dws_cpu_to_le(ctxt, sizeof(req->context)); be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); status = be_mbox_notify(ctrl); if (!status) { struct be_ring *defq_ring; struct be_defq_create_resp *resp = embedded_payload(wrb); dq->id = le16_to_cpu(resp->id); dq->created = true; if (is_header) defq_ring = &phba->phwi_ctrlr->default_pdu_hdr[ulp_num]; else defq_ring = &phba->phwi_ctrlr-> default_pdu_data[ulp_num]; defq_ring->id = dq->id; if (!phba->fw_config.dual_ulp_aware) { defq_ring->ulp_num = BEISCSI_ULP0; defq_ring->doorbell_offset = DB_RXULP0_OFFSET; } else { defq_ring->ulp_num = resp->ulp_num; defq_ring->doorbell_offset = resp->doorbell_offset; } } mutex_unlock(&ctrl->mbox_lock); return status; } /** * be_cmd_wrbq_create()- Create WRBQ * @ctrl: ptr to ctrl_info * @q_mem: memory details for the queue * @wrbq: queue info * @pwrb_context: ptr to wrb_context * @ulp_num: ULP on which the WRBQ is to be created * * Create WRBQ on the passed ULP_NUM. * **/ int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem, struct be_queue_info *wrbq, struct hwi_wrb_context *pwrb_context, uint8_t ulp_num) { struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); struct be_wrbq_create_req *req = embedded_payload(wrb); struct be_wrbq_create_resp *resp = embedded_payload(wrb); struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); int status; mutex_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, OPCODE_COMMON_ISCSI_WRBQ_CREATE, sizeof(*req)); req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); if (phba->fw_config.dual_ulp_aware) { req->ulp_num = ulp_num; req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT); req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT); } be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); status = be_mbox_notify(ctrl); if (!status) { wrbq->id = le16_to_cpu(resp->cid); wrbq->created = true; pwrb_context->cid = wrbq->id; if (!phba->fw_config.dual_ulp_aware) { pwrb_context->doorbell_offset = DB_TXULP0_OFFSET; pwrb_context->ulp_num = BEISCSI_ULP0; } else { pwrb_context->ulp_num = resp->ulp_num; pwrb_context->doorbell_offset = resp->doorbell_offset; } } mutex_unlock(&ctrl->mbox_lock); return status; } int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem) { struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); struct be_post_template_pages_req *req = embedded_payload(wrb); int status; mutex_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS, sizeof(*req)); req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI; be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); status = be_mbox_notify(ctrl); mutex_unlock(&ctrl->mbox_lock); return status; } int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info *ctrl) { struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); struct be_remove_template_pages_req *req = embedded_payload(wrb); int status; mutex_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS, sizeof(*req)); req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI; status = be_mbox_notify(ctrl); mutex_unlock(&ctrl->mbox_lock); return status; } int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem, u32 page_offset, u32 num_pages) { struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); struct be_post_sgl_pages_req *req = embedded_payload(wrb); struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); int status; unsigned int curr_pages; u32 temp_num_pages = num_pages; if (num_pages == 0xff) num_pages = 1; mutex_lock(&ctrl->mbox_lock); do { memset(wrb, 0, sizeof(*wrb)); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, OPCODE_COMMON_ISCSI_CFG_POST_SGL_PAGES, sizeof(*req)); curr_pages = BE_NUMBER_OF_FIELD(struct be_post_sgl_pages_req, pages); req->num_pages = min(num_pages, curr_pages); req->page_offset = page_offset; be_cmd_page_addrs_prepare(req->pages, req->num_pages, q_mem); q_mem->dma = q_mem->dma + (req->num_pages * PAGE_SIZE); page_offset += req->num_pages; num_pages -= req->num_pages; if (temp_num_pages == 0xff) req->num_pages = temp_num_pages; status = be_mbox_notify(ctrl); if (status) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BC_%d : FW CMD to map iscsi frags failed.\n"); goto error; } } while (num_pages > 0); error: mutex_unlock(&ctrl->mbox_lock); if (status != 0) beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL); return status; } /** * be_cmd_set_vlan()- Configure VLAN paramters on the adapter * @phba: device priv structure instance * @vlan_tag: TAG to be set * * Set the VLAN_TAG for the adapter or Disable VLAN on adapter * * returns * TAG for the MBX Cmd * **/ int be_cmd_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag) { unsigned int tag; struct be_mcc_wrb *wrb; struct be_cmd_set_vlan_req *req; struct be_ctrl_info *ctrl = &phba->ctrl; if (mutex_lock_interruptible(&ctrl->mbox_lock)) return 0; wrb = alloc_mcc_wrb(phba, &tag); if (!wrb) { mutex_unlock(&ctrl->mbox_lock); return 0; } req = embedded_payload(wrb); be_wrb_hdr_prepare(wrb, sizeof(*wrb), true, 0); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, OPCODE_COMMON_ISCSI_NTWK_SET_VLAN, sizeof(*req)); req->interface_hndl = phba->interface_handle; req->vlan_priority = vlan_tag; be_mcc_notify(phba, tag); mutex_unlock(&ctrl->mbox_lock); return tag; } int beiscsi_check_supported_fw(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba) { struct be_dma_mem nonemb_cmd; struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); struct be_mgmt_controller_attributes *req; struct be_sge *sge = nonembedded_sgl(wrb); int status = 0; nonemb_cmd.va = dma_alloc_coherent(&ctrl->pdev->dev, sizeof(struct be_mgmt_controller_attributes), &nonemb_cmd.dma, GFP_KERNEL); if (nonemb_cmd.va == NULL) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BG_%d : dma_alloc_coherent failed in %s\n", __func__); return -ENOMEM; } nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes); req = nonemb_cmd.va; memset(req, 0, sizeof(*req)); mutex_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_GET_CNTL_ATTRIBUTES, sizeof(*req)); sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma)); sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF); sge->len = cpu_to_le32(nonemb_cmd.size); status = be_mbox_notify(ctrl); if (!status) { struct be_mgmt_controller_attributes_resp *resp = nonemb_cmd.va; beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "BG_%d : Firmware Version of CMD : %s\n" "Firmware Version is : %s\n" "Developer Build, not performing version check...\n", resp->params.hba_attribs .flashrom_version_string, resp->params.hba_attribs. firmware_version_string); phba->fw_config.iscsi_features = resp->params.hba_attribs.iscsi_features; beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "BM_%d : phba->fw_config.iscsi_features = %d\n", phba->fw_config.iscsi_features); memcpy(phba->fw_ver_str, resp->params.hba_attribs. firmware_version_string, BEISCSI_VER_STRLEN); } else beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BG_%d : Failed in beiscsi_check_supported_fw\n"); mutex_unlock(&ctrl->mbox_lock); if (nonemb_cmd.va) dma_free_coherent(&ctrl->pdev->dev, nonemb_cmd.size, nonemb_cmd.va, nonemb_cmd.dma); return status; } /** * beiscsi_get_fw_config()- Get the FW config for the function * @ctrl: ptr to Ctrl Info * @phba: ptr to the dev priv structure * * Get the FW config and resources available for the function. * The resources are created based on the count received here. * * return * Success: 0 * Failure: Non-Zero Value **/ int beiscsi_get_fw_config(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba) { struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); struct be_fw_cfg *pfw_cfg = embedded_payload(wrb); uint32_t cid_count, icd_count; int status = -EINVAL; uint8_t ulp_num = 0; mutex_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); be_wrb_hdr_prepare(wrb, sizeof(*pfw_cfg), true, 0); be_cmd_hdr_prepare(&pfw_cfg->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, EMBED_MBX_MAX_PAYLOAD_SIZE); if (be_mbox_notify(ctrl)) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BG_%d : Failed in beiscsi_get_fw_config\n"); goto fail_init; } /* FW response formats depend on port id */ phba->fw_config.phys_port = pfw_cfg->phys_port; if (phba->fw_config.phys_port >= BEISCSI_PHYS_PORT_MAX) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BG_%d : invalid physical port id %d\n", phba->fw_config.phys_port); goto fail_init; } /* populate and check FW config against min and max values */ if (!is_chip_be2_be3r(phba)) { phba->fw_config.eqid_count = pfw_cfg->eqid_count; phba->fw_config.cqid_count = pfw_cfg->cqid_count; if (phba->fw_config.eqid_count == 0 || phba->fw_config.eqid_count > 2048) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BG_%d : invalid EQ count %d\n", phba->fw_config.eqid_count); goto fail_init; } if (phba->fw_config.cqid_count == 0 || phba->fw_config.cqid_count > 4096) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BG_%d : invalid CQ count %d\n", phba->fw_config.cqid_count); goto fail_init; } beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "BG_%d : EQ_Count : %d CQ_Count : %d\n", phba->fw_config.eqid_count, phba->fw_config.cqid_count); } /** * Check on which all ULP iSCSI Protocol is loaded. * Set the Bit for those ULP. This set flag is used * at all places in the code to check on which ULP * iSCSi Protocol is loaded **/ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { if (pfw_cfg->ulp[ulp_num].ulp_mode & BEISCSI_ULP_ISCSI_INI_MODE) { set_bit(ulp_num, &phba->fw_config.ulp_supported); /* Get the CID, ICD and Chain count for each ULP */ phba->fw_config.iscsi_cid_start[ulp_num] = pfw_cfg->ulp[ulp_num].sq_base; phba->fw_config.iscsi_cid_count[ulp_num] = pfw_cfg->ulp[ulp_num].sq_count; phba->fw_config.iscsi_icd_start[ulp_num] = pfw_cfg->ulp[ulp_num].icd_base; phba->fw_config.iscsi_icd_count[ulp_num] = pfw_cfg->ulp[ulp_num].icd_count; phba->fw_config.iscsi_chain_start[ulp_num] = pfw_cfg->chain_icd[ulp_num].chain_base; phba->fw_config.iscsi_chain_count[ulp_num] = pfw_cfg->chain_icd[ulp_num].chain_count; beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "BG_%d : Function loaded on ULP : %d\n" "\tiscsi_cid_count : %d\n" "\tiscsi_cid_start : %d\n" "\t iscsi_icd_count : %d\n" "\t iscsi_icd_start : %d\n", ulp_num, phba->fw_config. iscsi_cid_count[ulp_num], phba->fw_config. iscsi_cid_start[ulp_num], phba->fw_config. iscsi_icd_count[ulp_num], phba->fw_config. iscsi_icd_start[ulp_num]); } } if (phba->fw_config.ulp_supported == 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BG_%d : iSCSI initiator mode not set: ULP0 %x ULP1 %x\n", pfw_cfg->ulp[BEISCSI_ULP0].ulp_mode, pfw_cfg->ulp[BEISCSI_ULP1].ulp_mode); goto fail_init; } /** * ICD is shared among ULPs. Use icd_count of any one loaded ULP **/ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) break; icd_count = phba->fw_config.iscsi_icd_count[ulp_num]; if (icd_count == 0 || icd_count > 65536) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BG_%d: invalid ICD count %d\n", icd_count); goto fail_init; } cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) + BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1); if (cid_count == 0 || cid_count > 4096) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BG_%d: invalid CID count %d\n", cid_count); goto fail_init; } /** * Check FW is dual ULP aware i.e. can handle either * of the protocols. */ phba->fw_config.dual_ulp_aware = (pfw_cfg->function_mode & BEISCSI_FUNC_DUA_MODE); beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "BG_%d : DUA Mode : 0x%x\n", phba->fw_config.dual_ulp_aware); /* all set, continue using this FW config */ status = 0; fail_init: mutex_unlock(&ctrl->mbox_lock); return status; } /** * beiscsi_get_port_name()- Get port name for the function * @ctrl: ptr to Ctrl Info * @phba: ptr to the dev priv structure * * Get the alphanumeric character for port * **/ int beiscsi_get_port_name(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba) { int ret = 0; struct be_mcc_wrb *wrb; struct be_cmd_get_port_name *ioctl; mutex_lock(&ctrl->mbox_lock); wrb = wrb_from_mbox(&ctrl->mbox_mem); memset(wrb, 0, sizeof(*wrb)); ioctl = embedded_payload(wrb); be_wrb_hdr_prepare(wrb, sizeof(*ioctl), true, 0); be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_GET_PORT_NAME, EMBED_MBX_MAX_PAYLOAD_SIZE); ret = be_mbox_notify(ctrl); phba->port_name = 0; if (!ret) { phba->port_name = ioctl->p.resp.port_names >> (phba->fw_config.phys_port * 8) & 0xff; } else { beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "BG_%d : GET_PORT_NAME ret 0x%x status 0x%x\n", ret, ioctl->h.resp_hdr.status); } if (phba->port_name == 0) phba->port_name = '?'; mutex_unlock(&ctrl->mbox_lock); return ret; } int beiscsi_set_host_data(struct beiscsi_hba *phba) { struct be_ctrl_info *ctrl = &phba->ctrl; struct be_cmd_set_host_data *ioctl; struct be_mcc_wrb *wrb; int ret = 0; if (is_chip_be2_be3r(phba)) return ret; mutex_lock(&ctrl->mbox_lock); wrb = wrb_from_mbox(&ctrl->mbox_mem); memset(wrb, 0, sizeof(*wrb)); ioctl = embedded_payload(wrb); be_wrb_hdr_prepare(wrb, sizeof(*ioctl), true, 0); be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_SET_HOST_DATA, EMBED_MBX_MAX_PAYLOAD_SIZE); ioctl->param.req.param_id = BE_CMD_SET_HOST_PARAM_ID; ioctl->param.req.param_len = snprintf((char *)ioctl->param.req.param_data, sizeof(ioctl->param.req.param_data), "Linux iSCSI v%s", BUILD_STR); ioctl->param.req.param_len = ALIGN(ioctl->param.req.param_len + 1, 4); if (ioctl->param.req.param_len > BE_CMD_MAX_DRV_VERSION) ioctl->param.req.param_len = BE_CMD_MAX_DRV_VERSION; ret = be_mbox_notify(ctrl); if (!ret) { beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "BG_%d : HBA set host driver version\n"); } else { /** * Check "MCC_STATUS_INVALID_LENGTH" for SKH. * Older FW versions return this error. */ if (ret == MCC_STATUS_ILLEGAL_REQUEST || ret == MCC_STATUS_INVALID_LENGTH) __beiscsi_log(phba, KERN_INFO, "BG_%d : HBA failed to set host driver version\n"); } mutex_unlock(&ctrl->mbox_lock); return ret; } int beiscsi_set_uer_feature(struct beiscsi_hba *phba) { struct be_ctrl_info *ctrl = &phba->ctrl; struct be_cmd_set_features *ioctl; struct be_mcc_wrb *wrb; int ret = 0; mutex_lock(&ctrl->mbox_lock); wrb = wrb_from_mbox(&ctrl->mbox_mem); memset(wrb, 0, sizeof(*wrb)); ioctl = embedded_payload(wrb); be_wrb_hdr_prepare(wrb, sizeof(*ioctl), true, 0); be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_SET_FEATURES, EMBED_MBX_MAX_PAYLOAD_SIZE); ioctl->feature = BE_CMD_SET_FEATURE_UER; ioctl->param_len = sizeof(ioctl->param.req); ioctl->param.req.uer = BE_CMD_UER_SUPP_BIT; ret = be_mbox_notify(ctrl); if (!ret) { phba->ue2rp = ioctl->param.resp.ue2rp; set_bit(BEISCSI_HBA_UER_SUPP, &phba->state); beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "BG_%d : HBA error recovery supported\n"); } else { /** * Check "MCC_STATUS_INVALID_LENGTH" for SKH. * Older FW versions return this error. */ if (ret == MCC_STATUS_ILLEGAL_REQUEST || ret == MCC_STATUS_INVALID_LENGTH) __beiscsi_log(phba, KERN_INFO, "BG_%d : HBA error recovery not supported\n"); } mutex_unlock(&ctrl->mbox_lock); return ret; } static u32 beiscsi_get_post_stage(struct beiscsi_hba *phba) { u32 sem; if (is_chip_be2_be3r(phba)) sem = ioread32(phba->csr_va + SLIPORT_SEMAPHORE_OFFSET_BEx); else pci_read_config_dword(phba->pcidev, SLIPORT_SEMAPHORE_OFFSET_SH, &sem); return sem; } int beiscsi_check_fw_rdy(struct beiscsi_hba *phba) { u32 loop, post, rdy = 0; loop = 1000; while (loop--) { post = beiscsi_get_post_stage(phba); if (post & POST_ERROR_BIT) break; if ((post & POST_STAGE_MASK) == POST_STAGE_ARMFW_RDY) { rdy = 1; break; } msleep(60); } if (!rdy) { __beiscsi_log(phba, KERN_ERR, "BC_%d : FW not ready 0x%x\n", post); } return rdy; } int beiscsi_cmd_function_reset(struct beiscsi_hba *phba) { struct be_ctrl_info *ctrl = &phba->ctrl; struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); struct be_post_sgl_pages_req *req; int status; mutex_lock(&ctrl->mbox_lock); req = embedded_payload(wrb); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_FUNCTION_RESET, sizeof(*req)); status = be_mbox_notify(ctrl); mutex_unlock(&ctrl->mbox_lock); return status; } int beiscsi_cmd_special_wrb(struct be_ctrl_info *ctrl, u32 load) { struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); u8 *endian_check; int status; mutex_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); endian_check = (u8 *) wrb; if (load) { /* to start communicating */ *endian_check++ = 0xFF; *endian_check++ = 0x12; *endian_check++ = 0x34; *endian_check++ = 0xFF; *endian_check++ = 0xFF; *endian_check++ = 0x56; *endian_check++ = 0x78; *endian_check++ = 0xFF; } else { /* to stop communicating */ *endian_check++ = 0xFF; *endian_check++ = 0xAA; *endian_check++ = 0xBB; *endian_check++ = 0xFF; *endian_check++ = 0xFF; *endian_check++ = 0xCC; *endian_check++ = 0xDD; *endian_check = 0xFF; } be_dws_cpu_to_le(wrb, sizeof(*wrb)); status = be_mbox_notify(ctrl); if (status) beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "BC_%d : special WRB message failed\n"); mutex_unlock(&ctrl->mbox_lock); return status; } int beiscsi_init_sliport(struct beiscsi_hba *phba) { int status; /* check POST stage before talking to FW */ status = beiscsi_check_fw_rdy(phba); if (!status) return -EIO; /* clear all error states after checking FW rdy */ phba->state &= ~BEISCSI_HBA_IN_ERR; /* check again UER support */ phba->state &= ~BEISCSI_HBA_UER_SUPP; /* * SLI COMMON_FUNCTION_RESET completion is indicated by BMBX RDY bit. * It should clean up any stale info in FW for this fn. */ status = beiscsi_cmd_function_reset(phba); if (status) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BC_%d : SLI Function Reset failed\n"); return status; } /* indicate driver is loading */ return beiscsi_cmd_special_wrb(&phba->ctrl, 1); } /** * beiscsi_cmd_iscsi_cleanup()- Inform FW to cleanup EP data structures. * @phba: pointer to dev priv structure * @ulp: ULP number. * * return * Success: 0 * Failure: Non-Zero Value **/ int beiscsi_cmd_iscsi_cleanup(struct beiscsi_hba *phba, unsigned short ulp) { struct be_ctrl_info *ctrl = &phba->ctrl; struct iscsi_cleanup_req_v1 *req_v1; struct iscsi_cleanup_req *req; u16 hdr_ring_id, data_ring_id; struct be_mcc_wrb *wrb; int status; mutex_lock(&ctrl->mbox_lock); wrb = wrb_from_mbox(&ctrl->mbox_mem); hdr_ring_id = HWI_GET_DEF_HDRQ_ID(phba, ulp); data_ring_id = HWI_GET_DEF_BUFQ_ID(phba, ulp); if (is_chip_be2_be3r(phba)) { req = embedded_payload(wrb); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req)); req->chute = (1 << ulp); /* BE2/BE3 FW creates 8-bit ring id */ req->hdr_ring_id = hdr_ring_id; req->data_ring_id = data_ring_id; } else { req_v1 = embedded_payload(wrb); be_wrb_hdr_prepare(wrb, sizeof(*req_v1), true, 0); be_cmd_hdr_prepare(&req_v1->hdr, CMD_SUBSYSTEM_ISCSI, OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req_v1)); req_v1->hdr.version = 1; req_v1->chute = (1 << ulp); req_v1->hdr_ring_id = cpu_to_le16(hdr_ring_id); req_v1->data_ring_id = cpu_to_le16(data_ring_id); } status = be_mbox_notify(ctrl); if (status) beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, "BG_%d : %s failed %d\n", __func__, ulp); mutex_unlock(&ctrl->mbox_lock); return status; } /* * beiscsi_detect_ue()- Detect Unrecoverable Error on adapter * @phba: Driver priv structure * * Read registers linked to UE and check for the UE status **/ int beiscsi_detect_ue(struct beiscsi_hba *phba) { uint32_t ue_mask_hi = 0, ue_mask_lo = 0; uint32_t ue_hi = 0, ue_lo = 0; uint8_t i = 0; int ret = 0; pci_read_config_dword(phba->pcidev, PCICFG_UE_STATUS_LOW, &ue_lo); pci_read_config_dword(phba->pcidev, PCICFG_UE_STATUS_MASK_LOW, &ue_mask_lo); pci_read_config_dword(phba->pcidev, PCICFG_UE_STATUS_HIGH, &ue_hi); pci_read_config_dword(phba->pcidev, PCICFG_UE_STATUS_MASK_HI, &ue_mask_hi); ue_lo = (ue_lo & ~ue_mask_lo); ue_hi = (ue_hi & ~ue_mask_hi); if (ue_lo || ue_hi) { set_bit(BEISCSI_HBA_IN_UE, &phba->state); __beiscsi_log(phba, KERN_ERR, "BC_%d : HBA error detected\n"); ret = 1; } if (ue_lo) { for (i = 0; ue_lo; ue_lo >>= 1, i++) { if (ue_lo & 1) __beiscsi_log(phba, KERN_ERR, "BC_%d : UE_LOW %s bit set\n", desc_ue_status_low[i]); } } if (ue_hi) { for (i = 0; ue_hi; ue_hi >>= 1, i++) { if (ue_hi & 1) __beiscsi_log(phba, KERN_ERR, "BC_%d : UE_HIGH %s bit set\n", desc_ue_status_hi[i]); } } return ret; } /* * beiscsi_detect_tpe()- Detect Transient Parity Error on adapter * @phba: Driver priv structure * * Read SLIPORT SEMAPHORE register to check for UER * **/ int beiscsi_detect_tpe(struct beiscsi_hba *phba) { u32 post, status; int ret = 0; post = beiscsi_get_post_stage(phba); status = post & POST_STAGE_MASK; if ((status & POST_ERR_RECOVERY_CODE_MASK) == POST_STAGE_RECOVERABLE_ERR) { set_bit(BEISCSI_HBA_IN_TPE, &phba->state); __beiscsi_log(phba, KERN_INFO, "BC_%d : HBA error recoverable: 0x%x\n", post); ret = 1; } else { __beiscsi_log(phba, KERN_INFO, "BC_%d : HBA in UE: 0x%x\n", post); } return ret; }
linux-master
drivers/scsi/be2iscsi/be_cmds.c
/* * This file is part of the Emulex Linux Device Driver for Enterprise iSCSI * Host Bus Adapters. Refer to the README file included with this package * for driver version and adapter compatibility. * * Copyright (c) 2018 Broadcom. All Rights Reserved. * The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as published * by the Free Software Foundation. * * This program is distributed in the hope that it will be useful. ALL EXPRESS * OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ANY * IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, * OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH * DISCLAIMERS ARE HELD TO BE LEGALLY INVALID. * See the GNU General Public License for more details, a copy of which * can be found in the file COPYING included with this package. * * Contact Information: * [email protected] * */ #include <linux/reboot.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/blkdev.h> #include <linux/pci.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/semaphore.h> #include <linux/iscsi_boot_sysfs.h> #include <linux/module.h> #include <linux/bsg-lib.h> #include <linux/irq_poll.h> #include <scsi/libiscsi.h> #include <scsi/scsi_bsg_iscsi.h> #include <scsi/scsi_netlink.h> #include <scsi/scsi_transport_iscsi.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi.h> #include "be_main.h" #include "be_iscsi.h" #include "be_mgmt.h" #include "be_cmds.h" static unsigned int be_iopoll_budget = 10; static unsigned int be_max_phys_size = 64; static unsigned int enable_msix = 1; MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR); MODULE_VERSION(BUILD_STR); MODULE_AUTHOR("Emulex Corporation"); MODULE_LICENSE("GPL"); module_param(be_iopoll_budget, int, 0); module_param(enable_msix, int, 0); module_param(be_max_phys_size, uint, S_IRUGO); MODULE_PARM_DESC(be_max_phys_size, "Maximum Size (In Kilobytes) of physically contiguous " "memory that can be allocated. Range is 16 - 128"); #define beiscsi_disp_param(_name)\ static ssize_t \ beiscsi_##_name##_disp(struct device *dev,\ struct device_attribute *attrib, char *buf) \ { \ struct Scsi_Host *shost = class_to_shost(dev);\ struct beiscsi_hba *phba = iscsi_host_priv(shost); \ return snprintf(buf, PAGE_SIZE, "%d\n",\ phba->attr_##_name);\ } #define beiscsi_change_param(_name, _minval, _maxval, _defaval)\ static int \ beiscsi_##_name##_change(struct beiscsi_hba *phba, uint32_t val)\ {\ if (val >= _minval && val <= _maxval) {\ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\ "BA_%d : beiscsi_"#_name" updated "\ "from 0x%x ==> 0x%x\n",\ phba->attr_##_name, val); \ phba->attr_##_name = val;\ return 0;\ } \ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, \ "BA_%d beiscsi_"#_name" attribute "\ "cannot be updated to 0x%x, "\ "range allowed is ["#_minval" - "#_maxval"]\n", val);\ return -EINVAL;\ } #define beiscsi_store_param(_name) \ static ssize_t \ beiscsi_##_name##_store(struct device *dev,\ struct device_attribute *attr, const char *buf,\ size_t count) \ { \ struct Scsi_Host *shost = class_to_shost(dev);\ struct beiscsi_hba *phba = iscsi_host_priv(shost);\ uint32_t param_val = 0;\ if (!isdigit(buf[0]))\ return -EINVAL;\ if (sscanf(buf, "%i", &param_val) != 1)\ return -EINVAL;\ if (beiscsi_##_name##_change(phba, param_val) == 0) \ return strlen(buf);\ else \ return -EINVAL;\ } #define beiscsi_init_param(_name, _minval, _maxval, _defval) \ static int \ beiscsi_##_name##_init(struct beiscsi_hba *phba, uint32_t val) \ { \ if (val >= _minval && val <= _maxval) {\ phba->attr_##_name = val;\ return 0;\ } \ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\ "BA_%d beiscsi_"#_name" attribute " \ "cannot be updated to 0x%x, "\ "range allowed is ["#_minval" - "#_maxval"]\n", val);\ phba->attr_##_name = _defval;\ return -EINVAL;\ } #define BEISCSI_RW_ATTR(_name, _minval, _maxval, _defval, _descp) \ static uint beiscsi_##_name = _defval;\ module_param(beiscsi_##_name, uint, S_IRUGO);\ MODULE_PARM_DESC(beiscsi_##_name, _descp);\ beiscsi_disp_param(_name)\ beiscsi_change_param(_name, _minval, _maxval, _defval)\ beiscsi_store_param(_name)\ beiscsi_init_param(_name, _minval, _maxval, _defval)\ static DEVICE_ATTR(beiscsi_##_name, S_IRUGO | S_IWUSR,\ beiscsi_##_name##_disp, beiscsi_##_name##_store) /* * When new log level added update MAX allowed value for log_enable */ BEISCSI_RW_ATTR(log_enable, 0x00, 0xFF, 0x00, "Enable logging Bit Mask\n" "\t\t\t\tInitialization Events : 0x01\n" "\t\t\t\tMailbox Events : 0x02\n" "\t\t\t\tMiscellaneous Events : 0x04\n" "\t\t\t\tError Handling : 0x08\n" "\t\t\t\tIO Path Events : 0x10\n" "\t\t\t\tConfiguration Path : 0x20\n" "\t\t\t\tiSCSI Protocol : 0x40\n"); static DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL); static DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL); static DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL); static DEVICE_ATTR(beiscsi_phys_port, S_IRUGO, beiscsi_phys_port_disp, NULL); static DEVICE_ATTR(beiscsi_active_session_count, S_IRUGO, beiscsi_active_session_disp, NULL); static DEVICE_ATTR(beiscsi_free_session_count, S_IRUGO, beiscsi_free_session_disp, NULL); static struct attribute *beiscsi_attrs[] = { &dev_attr_beiscsi_log_enable.attr, &dev_attr_beiscsi_drvr_ver.attr, &dev_attr_beiscsi_adapter_family.attr, &dev_attr_beiscsi_fw_ver.attr, &dev_attr_beiscsi_active_session_count.attr, &dev_attr_beiscsi_free_session_count.attr, &dev_attr_beiscsi_phys_port.attr, NULL, }; ATTRIBUTE_GROUPS(beiscsi); static char const *cqe_desc[] = { "RESERVED_DESC", "SOL_CMD_COMPLETE", "SOL_CMD_KILLED_DATA_DIGEST_ERR", "CXN_KILLED_PDU_SIZE_EXCEEDS_DSL", "CXN_KILLED_BURST_LEN_MISMATCH", "CXN_KILLED_AHS_RCVD", "CXN_KILLED_HDR_DIGEST_ERR", "CXN_KILLED_UNKNOWN_HDR", "CXN_KILLED_STALE_ITT_TTT_RCVD", "CXN_KILLED_INVALID_ITT_TTT_RCVD", "CXN_KILLED_RST_RCVD", "CXN_KILLED_TIMED_OUT", "CXN_KILLED_RST_SENT", "CXN_KILLED_FIN_RCVD", "CXN_KILLED_BAD_UNSOL_PDU_RCVD", "CXN_KILLED_BAD_WRB_INDEX_ERROR", "CXN_KILLED_OVER_RUN_RESIDUAL", "CXN_KILLED_UNDER_RUN_RESIDUAL", "CMD_KILLED_INVALID_STATSN_RCVD", "CMD_KILLED_INVALID_R2T_RCVD", "CMD_CXN_KILLED_LUN_INVALID", "CMD_CXN_KILLED_ICD_INVALID", "CMD_CXN_KILLED_ITT_INVALID", "CMD_CXN_KILLED_SEQ_OUTOFORDER", "CMD_CXN_KILLED_INVALID_DATASN_RCVD", "CXN_INVALIDATE_NOTIFY", "CXN_INVALIDATE_INDEX_NOTIFY", "CMD_INVALIDATED_NOTIFY", "UNSOL_HDR_NOTIFY", "UNSOL_DATA_NOTIFY", "UNSOL_DATA_DIGEST_ERROR_NOTIFY", "DRIVERMSG_NOTIFY", "CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN", "SOL_CMD_KILLED_DIF_ERR", "CXN_KILLED_SYN_RCVD", "CXN_KILLED_IMM_DATA_RCVD" }; static int beiscsi_eh_abort(struct scsi_cmnd *sc) { struct iscsi_task *abrt_task = iscsi_cmd(sc)->task; struct iscsi_cls_session *cls_session; struct beiscsi_io_task *abrt_io_task; struct beiscsi_conn *beiscsi_conn; struct iscsi_session *session; struct invldt_cmd_tbl inv_tbl; struct beiscsi_hba *phba; struct iscsi_conn *conn; int rc; cls_session = starget_to_session(scsi_target(sc->device)); session = cls_session->dd_data; completion_check: /* check if we raced, task just got cleaned up under us */ spin_lock_bh(&session->back_lock); if (!abrt_task || !abrt_task->sc) { spin_unlock_bh(&session->back_lock); return SUCCESS; } /* get a task ref till FW processes the req for the ICD used */ if (!iscsi_get_task(abrt_task)) { spin_unlock(&session->back_lock); /* We are just about to call iscsi_free_task so wait for it. */ udelay(5); goto completion_check; } abrt_io_task = abrt_task->dd_data; conn = abrt_task->conn; beiscsi_conn = conn->dd_data; phba = beiscsi_conn->phba; /* mark WRB invalid which have been not processed by FW yet */ if (is_chip_be2_be3r(phba)) { AMAP_SET_BITS(struct amap_iscsi_wrb, invld, abrt_io_task->pwrb_handle->pwrb, 1); } else { AMAP_SET_BITS(struct amap_iscsi_wrb_v2, invld, abrt_io_task->pwrb_handle->pwrb, 1); } inv_tbl.cid = beiscsi_conn->beiscsi_conn_cid; inv_tbl.icd = abrt_io_task->psgl_handle->sgl_index; spin_unlock_bh(&session->back_lock); rc = beiscsi_mgmt_invalidate_icds(phba, &inv_tbl, 1); iscsi_put_task(abrt_task); if (rc) { beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH, "BM_%d : sc %p invalidation failed %d\n", sc, rc); return FAILED; } return iscsi_eh_abort(sc); } static int beiscsi_eh_device_reset(struct scsi_cmnd *sc) { struct beiscsi_invldt_cmd_tbl { struct invldt_cmd_tbl tbl[BE_INVLDT_CMD_TBL_SZ]; struct iscsi_task *task[BE_INVLDT_CMD_TBL_SZ]; } *inv_tbl; struct iscsi_cls_session *cls_session; struct beiscsi_conn *beiscsi_conn; struct beiscsi_io_task *io_task; struct iscsi_session *session; struct beiscsi_hba *phba; struct iscsi_conn *conn; struct iscsi_task *task; unsigned int i, nents; int rc, more = 0; cls_session = starget_to_session(scsi_target(sc->device)); session = cls_session->dd_data; spin_lock_bh(&session->frwd_lock); if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) { spin_unlock_bh(&session->frwd_lock); return FAILED; } conn = session->leadconn; beiscsi_conn = conn->dd_data; phba = beiscsi_conn->phba; inv_tbl = kzalloc(sizeof(*inv_tbl), GFP_ATOMIC); if (!inv_tbl) { spin_unlock_bh(&session->frwd_lock); beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, "BM_%d : invldt_cmd_tbl alloc failed\n"); return FAILED; } nents = 0; /* take back_lock to prevent task from getting cleaned up under us */ spin_lock(&session->back_lock); for (i = 0; i < conn->session->cmds_max; i++) { task = conn->session->cmds[i]; if (!task->sc) continue; if (sc->device->lun != task->sc->device->lun) continue; /** * Can't fit in more cmds? Normally this won't happen b'coz * BEISCSI_CMD_PER_LUN is same as BE_INVLDT_CMD_TBL_SZ. */ if (nents == BE_INVLDT_CMD_TBL_SZ) { more = 1; break; } /* get a task ref till FW processes the req for the ICD used */ if (!iscsi_get_task(task)) { /* * The task has completed in the driver and is * completing in libiscsi. Just ignore it here. When we * call iscsi_eh_device_reset, it will wait for us. */ continue; } io_task = task->dd_data; /* mark WRB invalid which have been not processed by FW yet */ if (is_chip_be2_be3r(phba)) { AMAP_SET_BITS(struct amap_iscsi_wrb, invld, io_task->pwrb_handle->pwrb, 1); } else { AMAP_SET_BITS(struct amap_iscsi_wrb_v2, invld, io_task->pwrb_handle->pwrb, 1); } inv_tbl->tbl[nents].cid = beiscsi_conn->beiscsi_conn_cid; inv_tbl->tbl[nents].icd = io_task->psgl_handle->sgl_index; inv_tbl->task[nents] = task; nents++; } spin_unlock(&session->back_lock); spin_unlock_bh(&session->frwd_lock); rc = SUCCESS; if (!nents) goto end_reset; if (more) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, "BM_%d : number of cmds exceeds size of invalidation table\n"); rc = FAILED; goto end_reset; } if (beiscsi_mgmt_invalidate_icds(phba, &inv_tbl->tbl[0], nents)) { beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH, "BM_%d : cid %u scmds invalidation failed\n", beiscsi_conn->beiscsi_conn_cid); rc = FAILED; } end_reset: for (i = 0; i < nents; i++) iscsi_put_task(inv_tbl->task[i]); kfree(inv_tbl); if (rc == SUCCESS) rc = iscsi_eh_device_reset(sc); return rc; } /*------------------- PCI Driver operations and data ----------------- */ static const struct pci_device_id beiscsi_pci_id_table[] = { { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) }, { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) }, { PCI_DEVICE(ELX_VENDOR_ID, OC_SKH_ID1) }, { 0 } }; MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table); static const struct scsi_host_template beiscsi_sht = { .module = THIS_MODULE, .name = "Emulex 10Gbe open-iscsi Initiator Driver", .proc_name = DRV_NAME, .queuecommand = iscsi_queuecommand, .change_queue_depth = scsi_change_queue_depth, .target_alloc = iscsi_target_alloc, .eh_timed_out = iscsi_eh_cmd_timed_out, .eh_abort_handler = beiscsi_eh_abort, .eh_device_reset_handler = beiscsi_eh_device_reset, .eh_target_reset_handler = iscsi_eh_session_reset, .shost_groups = beiscsi_groups, .sg_tablesize = BEISCSI_SGLIST_ELEMENTS, .can_queue = BE2_IO_DEPTH, .this_id = -1, .max_sectors = BEISCSI_MAX_SECTORS, .max_segment_size = 65536, .cmd_per_lun = BEISCSI_CMD_PER_LUN, .vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID, .track_queue_depth = 1, .cmd_size = sizeof(struct iscsi_cmd), }; static struct scsi_transport_template *beiscsi_scsi_transport; static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev) { struct beiscsi_hba *phba; struct Scsi_Host *shost; shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0); if (!shost) { dev_err(&pcidev->dev, "beiscsi_hba_alloc - iscsi_host_alloc failed\n"); return NULL; } shost->max_id = BE2_MAX_SESSIONS - 1; shost->max_channel = 0; shost->max_cmd_len = BEISCSI_MAX_CMD_LEN; shost->max_lun = BEISCSI_NUM_MAX_LUN; shost->transportt = beiscsi_scsi_transport; phba = iscsi_host_priv(shost); memset(phba, 0, sizeof(*phba)); phba->shost = shost; phba->pcidev = pci_dev_get(pcidev); pci_set_drvdata(pcidev, phba); phba->interface_handle = 0xFFFFFFFF; return phba; } static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba) { if (phba->csr_va) { iounmap(phba->csr_va); phba->csr_va = NULL; } if (phba->db_va) { iounmap(phba->db_va); phba->db_va = NULL; } if (phba->pci_va) { iounmap(phba->pci_va); phba->pci_va = NULL; } } static int beiscsi_map_pci_bars(struct beiscsi_hba *phba, struct pci_dev *pcidev) { u8 __iomem *addr; int pcicfg_reg; addr = ioremap(pci_resource_start(pcidev, 2), pci_resource_len(pcidev, 2)); if (addr == NULL) return -ENOMEM; phba->ctrl.csr = addr; phba->csr_va = addr; addr = ioremap(pci_resource_start(pcidev, 4), 128 * 1024); if (addr == NULL) goto pci_map_err; phba->ctrl.db = addr; phba->db_va = addr; if (phba->generation == BE_GEN2) pcicfg_reg = 1; else pcicfg_reg = 0; addr = ioremap(pci_resource_start(pcidev, pcicfg_reg), pci_resource_len(pcidev, pcicfg_reg)); if (addr == NULL) goto pci_map_err; phba->ctrl.pcicfg = addr; phba->pci_va = addr; return 0; pci_map_err: beiscsi_unmap_pci_function(phba); return -ENOMEM; } static int beiscsi_enable_pci(struct pci_dev *pcidev) { int ret; ret = pci_enable_device(pcidev); if (ret) { dev_err(&pcidev->dev, "beiscsi_enable_pci - enable device failed\n"); return ret; } ret = pci_request_regions(pcidev, DRV_NAME); if (ret) { dev_err(&pcidev->dev, "beiscsi_enable_pci - request region failed\n"); goto pci_dev_disable; } pci_set_master(pcidev); ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64)); if (ret) { ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32)); if (ret) { dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n"); goto pci_region_release; } } return 0; pci_region_release: pci_release_regions(pcidev); pci_dev_disable: pci_disable_device(pcidev); return ret; } static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev) { struct be_ctrl_info *ctrl = &phba->ctrl; struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced; struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem; int status = 0; ctrl->pdev = pdev; status = beiscsi_map_pci_bars(phba, pdev); if (status) return status; mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; mbox_mem_alloc->va = dma_alloc_coherent(&pdev->dev, mbox_mem_alloc->size, &mbox_mem_alloc->dma, GFP_KERNEL); if (!mbox_mem_alloc->va) { beiscsi_unmap_pci_function(phba); return -ENOMEM; } mbox_mem_align->size = sizeof(struct be_mcc_mailbox); mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); mutex_init(&ctrl->mbox_lock); spin_lock_init(&phba->ctrl.mcc_lock); return status; } /** * beiscsi_get_params()- Set the config paramters * @phba: ptr device priv structure **/ static void beiscsi_get_params(struct beiscsi_hba *phba) { uint32_t total_cid_count = 0; uint32_t total_icd_count = 0; uint8_t ulp_num = 0; total_cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) + BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1); for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { uint32_t align_mask = 0; uint32_t icd_post_per_page = 0; uint32_t icd_count_unavailable = 0; uint32_t icd_start = 0, icd_count = 0; uint32_t icd_start_align = 0, icd_count_align = 0; if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { icd_start = phba->fw_config.iscsi_icd_start[ulp_num]; icd_count = phba->fw_config.iscsi_icd_count[ulp_num]; /* Get ICD count that can be posted on each page */ icd_post_per_page = (PAGE_SIZE / (BE2_SGE * sizeof(struct iscsi_sge))); align_mask = (icd_post_per_page - 1); /* Check if icd_start is aligned ICD per page posting */ if (icd_start % icd_post_per_page) { icd_start_align = ((icd_start + icd_post_per_page) & ~(align_mask)); phba->fw_config. iscsi_icd_start[ulp_num] = icd_start_align; } icd_count_align = (icd_count & ~align_mask); /* ICD discarded in the process of alignment */ if (icd_start_align) icd_count_unavailable = ((icd_start_align - icd_start) + (icd_count - icd_count_align)); /* Updated ICD count available */ phba->fw_config.iscsi_icd_count[ulp_num] = (icd_count - icd_count_unavailable); beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "BM_%d : Aligned ICD values\n" "\t ICD Start : %d\n" "\t ICD Count : %d\n" "\t ICD Discarded : %d\n", phba->fw_config. iscsi_icd_start[ulp_num], phba->fw_config. iscsi_icd_count[ulp_num], icd_count_unavailable); break; } } total_icd_count = phba->fw_config.iscsi_icd_count[ulp_num]; phba->params.ios_per_ctrl = (total_icd_count - (total_cid_count + BE2_TMFS + BE2_NOPOUT_REQ)); phba->params.cxns_per_ctrl = total_cid_count; phba->params.icds_per_ctrl = total_icd_count; phba->params.num_sge_per_io = BE2_SGE; phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ; phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ; phba->params.num_eq_entries = 1024; phba->params.num_cq_entries = 1024; phba->params.wrbs_per_cxn = 256; } static void hwi_ring_eq_db(struct beiscsi_hba *phba, unsigned int id, unsigned int clr_interrupt, unsigned int num_processed, unsigned char rearm, unsigned char event) { u32 val = 0; if (rearm) val |= 1 << DB_EQ_REARM_SHIFT; if (clr_interrupt) val |= 1 << DB_EQ_CLR_SHIFT; if (event) val |= 1 << DB_EQ_EVNT_SHIFT; val |= num_processed << DB_EQ_NUM_POPPED_SHIFT; /* Setting lower order EQ_ID Bits */ val |= (id & DB_EQ_RING_ID_LOW_MASK); /* Setting Higher order EQ_ID Bits */ val |= (((id >> DB_EQ_HIGH_FEILD_SHIFT) & DB_EQ_RING_ID_HIGH_MASK) << DB_EQ_HIGH_SET_SHIFT); iowrite32(val, phba->db_va + DB_EQ_OFFSET); } /** * be_isr_mcc - The isr routine of the driver. * @irq: Not used * @dev_id: Pointer to host adapter structure */ static irqreturn_t be_isr_mcc(int irq, void *dev_id) { struct beiscsi_hba *phba; struct be_eq_entry *eqe; struct be_queue_info *eq; struct be_queue_info *mcc; unsigned int mcc_events; struct be_eq_obj *pbe_eq; pbe_eq = dev_id; eq = &pbe_eq->q; phba = pbe_eq->phba; mcc = &phba->ctrl.mcc_obj.cq; eqe = queue_tail_node(eq); mcc_events = 0; while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] & EQE_VALID_MASK) { if (((eqe->dw[offsetof(struct amap_eq_entry, resource_id) / 32] & EQE_RESID_MASK) >> 16) == mcc->id) { mcc_events++; } AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); queue_tail_inc(eq); eqe = queue_tail_node(eq); } if (mcc_events) { queue_work(phba->wq, &pbe_eq->mcc_work); hwi_ring_eq_db(phba, eq->id, 1, mcc_events, 1, 1); } return IRQ_HANDLED; } /** * be_isr_msix - The isr routine of the driver. * @irq: Not used * @dev_id: Pointer to host adapter structure */ static irqreturn_t be_isr_msix(int irq, void *dev_id) { struct beiscsi_hba *phba; struct be_queue_info *eq; struct be_eq_obj *pbe_eq; pbe_eq = dev_id; eq = &pbe_eq->q; phba = pbe_eq->phba; /* disable interrupt till iopoll completes */ hwi_ring_eq_db(phba, eq->id, 1, 0, 0, 1); irq_poll_sched(&pbe_eq->iopoll); return IRQ_HANDLED; } /** * be_isr - The isr routine of the driver. * @irq: Not used * @dev_id: Pointer to host adapter structure */ static irqreturn_t be_isr(int irq, void *dev_id) { struct beiscsi_hba *phba; struct hwi_controller *phwi_ctrlr; struct hwi_context_memory *phwi_context; struct be_eq_entry *eqe; struct be_queue_info *eq; struct be_queue_info *mcc; unsigned int mcc_events, io_events; struct be_ctrl_info *ctrl; struct be_eq_obj *pbe_eq; int isr, rearm; phba = dev_id; ctrl = &phba->ctrl; isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET + (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE)); if (!isr) return IRQ_NONE; phwi_ctrlr = phba->phwi_ctrlr; phwi_context = phwi_ctrlr->phwi_ctxt; pbe_eq = &phwi_context->be_eq[0]; eq = &phwi_context->be_eq[0].q; mcc = &phba->ctrl.mcc_obj.cq; eqe = queue_tail_node(eq); io_events = 0; mcc_events = 0; while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] & EQE_VALID_MASK) { if (((eqe->dw[offsetof(struct amap_eq_entry, resource_id) / 32] & EQE_RESID_MASK) >> 16) == mcc->id) mcc_events++; else io_events++; AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); queue_tail_inc(eq); eqe = queue_tail_node(eq); } if (!io_events && !mcc_events) return IRQ_NONE; /* no need to rearm if interrupt is only for IOs */ rearm = 0; if (mcc_events) { queue_work(phba->wq, &pbe_eq->mcc_work); /* rearm for MCCQ */ rearm = 1; } if (io_events) irq_poll_sched(&pbe_eq->iopoll); hwi_ring_eq_db(phba, eq->id, 0, (io_events + mcc_events), rearm, 1); return IRQ_HANDLED; } static void beiscsi_free_irqs(struct beiscsi_hba *phba) { struct hwi_context_memory *phwi_context; int i; if (!phba->pcidev->msix_enabled) { if (phba->pcidev->irq) free_irq(phba->pcidev->irq, phba); return; } phwi_context = phba->phwi_ctrlr->phwi_ctxt; for (i = 0; i <= phba->num_cpus; i++) { free_irq(pci_irq_vector(phba->pcidev, i), &phwi_context->be_eq[i]); kfree(phba->msi_name[i]); } } static int beiscsi_init_irqs(struct beiscsi_hba *phba) { struct pci_dev *pcidev = phba->pcidev; struct hwi_controller *phwi_ctrlr; struct hwi_context_memory *phwi_context; int ret, i, j; phwi_ctrlr = phba->phwi_ctrlr; phwi_context = phwi_ctrlr->phwi_ctxt; if (pcidev->msix_enabled) { for (i = 0; i < phba->num_cpus; i++) { phba->msi_name[i] = kasprintf(GFP_KERNEL, "beiscsi_%02x_%02x", phba->shost->host_no, i); if (!phba->msi_name[i]) { ret = -ENOMEM; goto free_msix_irqs; } ret = request_irq(pci_irq_vector(pcidev, i), be_isr_msix, 0, phba->msi_name[i], &phwi_context->be_eq[i]); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : %s-Failed to register msix for i = %d\n", __func__, i); kfree(phba->msi_name[i]); goto free_msix_irqs; } } phba->msi_name[i] = kasprintf(GFP_KERNEL, "beiscsi_mcc_%02x", phba->shost->host_no); if (!phba->msi_name[i]) { ret = -ENOMEM; goto free_msix_irqs; } ret = request_irq(pci_irq_vector(pcidev, i), be_isr_mcc, 0, phba->msi_name[i], &phwi_context->be_eq[i]); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : %s-Failed to register beiscsi_msix_mcc\n", __func__); kfree(phba->msi_name[i]); goto free_msix_irqs; } } else { ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED, "beiscsi", phba); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : %s-Failed to register irq\n", __func__); return ret; } } return 0; free_msix_irqs: for (j = i - 1; j >= 0; j--) { free_irq(pci_irq_vector(pcidev, i), &phwi_context->be_eq[j]); kfree(phba->msi_name[j]); } return ret; } void hwi_ring_cq_db(struct beiscsi_hba *phba, unsigned int id, unsigned int num_processed, unsigned char rearm) { u32 val = 0; if (rearm) val |= 1 << DB_CQ_REARM_SHIFT; val |= num_processed << DB_CQ_NUM_POPPED_SHIFT; /* Setting lower order CQ_ID Bits */ val |= (id & DB_CQ_RING_ID_LOW_MASK); /* Setting Higher order CQ_ID Bits */ val |= (((id >> DB_CQ_HIGH_FEILD_SHIFT) & DB_CQ_RING_ID_HIGH_MASK) << DB_CQ_HIGH_SET_SHIFT); iowrite32(val, phba->db_va + DB_CQ_OFFSET); } static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba) { struct sgl_handle *psgl_handle; unsigned long flags; spin_lock_irqsave(&phba->io_sgl_lock, flags); if (phba->io_sgl_hndl_avbl) { beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, "BM_%d : In alloc_io_sgl_handle," " io_sgl_alloc_index=%d\n", phba->io_sgl_alloc_index); psgl_handle = phba->io_sgl_hndl_base[phba-> io_sgl_alloc_index]; phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL; phba->io_sgl_hndl_avbl--; if (phba->io_sgl_alloc_index == (phba->params. ios_per_ctrl - 1)) phba->io_sgl_alloc_index = 0; else phba->io_sgl_alloc_index++; } else psgl_handle = NULL; spin_unlock_irqrestore(&phba->io_sgl_lock, flags); return psgl_handle; } static void free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) { unsigned long flags; spin_lock_irqsave(&phba->io_sgl_lock, flags); beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, "BM_%d : In free_,io_sgl_free_index=%d\n", phba->io_sgl_free_index); if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) { /* * this can happen if clean_task is called on a task that * failed in xmit_task or alloc_pdu. */ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, "BM_%d : Double Free in IO SGL io_sgl_free_index=%d, value there=%p\n", phba->io_sgl_free_index, phba->io_sgl_hndl_base[phba->io_sgl_free_index]); spin_unlock_irqrestore(&phba->io_sgl_lock, flags); return; } phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle; phba->io_sgl_hndl_avbl++; if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1)) phba->io_sgl_free_index = 0; else phba->io_sgl_free_index++; spin_unlock_irqrestore(&phba->io_sgl_lock, flags); } static inline struct wrb_handle * beiscsi_get_wrb_handle(struct hwi_wrb_context *pwrb_context, unsigned int wrbs_per_cxn) { struct wrb_handle *pwrb_handle; unsigned long flags; spin_lock_irqsave(&pwrb_context->wrb_lock, flags); if (!pwrb_context->wrb_handles_available) { spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags); return NULL; } pwrb_handle = pwrb_context->pwrb_handle_base[pwrb_context->alloc_index]; pwrb_context->wrb_handles_available--; if (pwrb_context->alloc_index == (wrbs_per_cxn - 1)) pwrb_context->alloc_index = 0; else pwrb_context->alloc_index++; spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags); if (pwrb_handle) memset(pwrb_handle->pwrb, 0, sizeof(*pwrb_handle->pwrb)); return pwrb_handle; } /** * alloc_wrb_handle - To allocate a wrb handle * @phba: The hba pointer * @cid: The cid to use for allocation * @pcontext: ptr to ptr to wrb context * * This happens under session_lock until submission to chip */ struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid, struct hwi_wrb_context **pcontext) { struct hwi_wrb_context *pwrb_context; struct hwi_controller *phwi_ctrlr; uint16_t cri_index = BE_GET_CRI_FROM_CID(cid); phwi_ctrlr = phba->phwi_ctrlr; pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; /* return the context address */ *pcontext = pwrb_context; return beiscsi_get_wrb_handle(pwrb_context, phba->params.wrbs_per_cxn); } static inline void beiscsi_put_wrb_handle(struct hwi_wrb_context *pwrb_context, struct wrb_handle *pwrb_handle, unsigned int wrbs_per_cxn) { unsigned long flags; spin_lock_irqsave(&pwrb_context->wrb_lock, flags); pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle; pwrb_context->wrb_handles_available++; if (pwrb_context->free_index == (wrbs_per_cxn - 1)) pwrb_context->free_index = 0; else pwrb_context->free_index++; pwrb_handle->pio_handle = NULL; spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags); } /** * free_wrb_handle - To free the wrb handle back to pool * @phba: The hba pointer * @pwrb_context: The context to free from * @pwrb_handle: The wrb_handle to free * * This happens under session_lock until submission to chip */ static void free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context, struct wrb_handle *pwrb_handle) { beiscsi_put_wrb_handle(pwrb_context, pwrb_handle, phba->params.wrbs_per_cxn); beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, "BM_%d : FREE WRB: pwrb_handle=%p free_index=0x%x " "wrb_handles_available=%d\n", pwrb_handle, pwrb_context->free_index, pwrb_context->wrb_handles_available); } static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba) { struct sgl_handle *psgl_handle; unsigned long flags; spin_lock_irqsave(&phba->mgmt_sgl_lock, flags); if (phba->eh_sgl_hndl_avbl) { psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index]; phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL; beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BM_%d : mgmt_sgl_alloc_index=%d=0x%x\n", phba->eh_sgl_alloc_index, phba->eh_sgl_alloc_index); phba->eh_sgl_hndl_avbl--; if (phba->eh_sgl_alloc_index == (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1)) phba->eh_sgl_alloc_index = 0; else phba->eh_sgl_alloc_index++; } else psgl_handle = NULL; spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags); return psgl_handle; } void free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) { unsigned long flags; spin_lock_irqsave(&phba->mgmt_sgl_lock, flags); beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BM_%d : In free_mgmt_sgl_handle," "eh_sgl_free_index=%d\n", phba->eh_sgl_free_index); if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) { /* * this can happen if clean_task is called on a task that * failed in xmit_task or alloc_pdu. */ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, "BM_%d : Double Free in eh SGL ," "eh_sgl_free_index=%d\n", phba->eh_sgl_free_index); spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags); return; } phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle; phba->eh_sgl_hndl_avbl++; if (phba->eh_sgl_free_index == (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1)) phba->eh_sgl_free_index = 0; else phba->eh_sgl_free_index++; spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags); } static void be_complete_io(struct beiscsi_conn *beiscsi_conn, struct iscsi_task *task, struct common_sol_cqe *csol_cqe) { struct beiscsi_io_task *io_task = task->dd_data; struct be_status_bhs *sts_bhs = (struct be_status_bhs *)io_task->cmd_bhs; struct iscsi_conn *conn = beiscsi_conn->conn; unsigned char *sense; u32 resid = 0, exp_cmdsn, max_cmdsn; u8 rsp, status, flags; exp_cmdsn = csol_cqe->exp_cmdsn; max_cmdsn = (csol_cqe->exp_cmdsn + csol_cqe->cmd_wnd - 1); rsp = csol_cqe->i_resp; status = csol_cqe->i_sts; flags = csol_cqe->i_flags; resid = csol_cqe->res_cnt; if (!task->sc) { if (io_task->scsi_cmnd) { scsi_dma_unmap(io_task->scsi_cmnd); io_task->scsi_cmnd = NULL; } return; } task->sc->result = (DID_OK << 16) | status; if (rsp != ISCSI_STATUS_CMD_COMPLETED) { task->sc->result = DID_ERROR << 16; goto unmap; } /* bidi not initially supported */ if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) { if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW)) task->sc->result = DID_ERROR << 16; if (flags & ISCSI_FLAG_CMD_UNDERFLOW) { scsi_set_resid(task->sc, resid); if (!status && (scsi_bufflen(task->sc) - resid < task->sc->underflow)) task->sc->result = DID_ERROR << 16; } } if (status == SAM_STAT_CHECK_CONDITION) { u16 sense_len; unsigned short *slen = (unsigned short *)sts_bhs->sense_info; sense = sts_bhs->sense_info + sizeof(unsigned short); sense_len = be16_to_cpu(*slen); memcpy(task->sc->sense_buffer, sense, min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE)); } if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) conn->rxdata_octets += resid; unmap: if (io_task->scsi_cmnd) { scsi_dma_unmap(io_task->scsi_cmnd); io_task->scsi_cmnd = NULL; } iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn); } static void be_complete_logout(struct beiscsi_conn *beiscsi_conn, struct iscsi_task *task, struct common_sol_cqe *csol_cqe) { struct iscsi_logout_rsp *hdr; struct beiscsi_io_task *io_task = task->dd_data; struct iscsi_conn *conn = beiscsi_conn->conn; hdr = (struct iscsi_logout_rsp *)task->hdr; hdr->opcode = ISCSI_OP_LOGOUT_RSP; hdr->t2wait = 5; hdr->t2retain = 0; hdr->flags = csol_cqe->i_flags; hdr->response = csol_cqe->i_resp; hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + csol_cqe->cmd_wnd - 1); hdr->dlength[0] = 0; hdr->dlength[1] = 0; hdr->dlength[2] = 0; hdr->hlength = 0; hdr->itt = io_task->libiscsi_itt; __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); } static void be_complete_tmf(struct beiscsi_conn *beiscsi_conn, struct iscsi_task *task, struct common_sol_cqe *csol_cqe) { struct iscsi_tm_rsp *hdr; struct iscsi_conn *conn = beiscsi_conn->conn; struct beiscsi_io_task *io_task = task->dd_data; hdr = (struct iscsi_tm_rsp *)task->hdr; hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP; hdr->flags = csol_cqe->i_flags; hdr->response = csol_cqe->i_resp; hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + csol_cqe->cmd_wnd - 1); hdr->itt = io_task->libiscsi_itt; __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); } static void hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn, struct beiscsi_hba *phba, struct sol_cqe *psol) { struct hwi_wrb_context *pwrb_context; uint16_t wrb_index, cid, cri_index; struct hwi_controller *phwi_ctrlr; struct wrb_handle *pwrb_handle; struct iscsi_session *session; struct iscsi_task *task; phwi_ctrlr = phba->phwi_ctrlr; if (is_chip_be2_be3r(phba)) { wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe, wrb_idx, psol); cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe, cid, psol); } else { wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, wrb_idx, psol); cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, cid, psol); } cri_index = BE_GET_CRI_FROM_CID(cid); pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index]; session = beiscsi_conn->conn->session; spin_lock_bh(&session->back_lock); task = pwrb_handle->pio_handle; if (task) __iscsi_put_task(task); spin_unlock_bh(&session->back_lock); } static void be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn, struct iscsi_task *task, struct common_sol_cqe *csol_cqe) { struct iscsi_nopin *hdr; struct iscsi_conn *conn = beiscsi_conn->conn; struct beiscsi_io_task *io_task = task->dd_data; hdr = (struct iscsi_nopin *)task->hdr; hdr->flags = csol_cqe->i_flags; hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + csol_cqe->cmd_wnd - 1); hdr->opcode = ISCSI_OP_NOOP_IN; hdr->itt = io_task->libiscsi_itt; __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); } static void adapter_get_sol_cqe(struct beiscsi_hba *phba, struct sol_cqe *psol, struct common_sol_cqe *csol_cqe) { if (is_chip_be2_be3r(phba)) { csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe, i_exp_cmd_sn, psol); csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe, i_res_cnt, psol); csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe, i_cmd_wnd, psol); csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe, wrb_index, psol); csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, psol); csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe, hw_sts, psol); csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe, i_resp, psol); csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe, i_sts, psol); csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe, i_flags, psol); } else { csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe_v2, i_exp_cmd_sn, psol); csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe_v2, i_res_cnt, psol); csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe_v2, wrb_index, psol); csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe_v2, cid, psol); csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2, hw_sts, psol); csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe_v2, i_cmd_wnd, psol); if (AMAP_GET_BITS(struct amap_sol_cqe_v2, cmd_cmpl, psol)) csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2, i_sts, psol); else csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe_v2, i_sts, psol); if (AMAP_GET_BITS(struct amap_sol_cqe_v2, u, psol)) csol_cqe->i_flags = ISCSI_FLAG_CMD_UNDERFLOW; if (AMAP_GET_BITS(struct amap_sol_cqe_v2, o, psol)) csol_cqe->i_flags |= ISCSI_FLAG_CMD_OVERFLOW; } } static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn, struct beiscsi_hba *phba, struct sol_cqe *psol) { struct iscsi_conn *conn = beiscsi_conn->conn; struct iscsi_session *session = conn->session; struct common_sol_cqe csol_cqe = {0}; struct hwi_wrb_context *pwrb_context; struct hwi_controller *phwi_ctrlr; struct wrb_handle *pwrb_handle; struct iscsi_task *task; uint16_t cri_index = 0; uint8_t type; phwi_ctrlr = phba->phwi_ctrlr; /* Copy the elements to a common structure */ adapter_get_sol_cqe(phba, psol, &csol_cqe); cri_index = BE_GET_CRI_FROM_CID(csol_cqe.cid); pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; pwrb_handle = pwrb_context->pwrb_handle_basestd[ csol_cqe.wrb_index]; spin_lock_bh(&session->back_lock); task = pwrb_handle->pio_handle; if (!task) { spin_unlock_bh(&session->back_lock); return; } type = ((struct beiscsi_io_task *)task->dd_data)->wrb_type; switch (type) { case HWH_TYPE_IO: case HWH_TYPE_IO_RD: if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_NOOP_OUT) be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe); else be_complete_io(beiscsi_conn, task, &csol_cqe); break; case HWH_TYPE_LOGOUT: if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT) be_complete_logout(beiscsi_conn, task, &csol_cqe); else be_complete_tmf(beiscsi_conn, task, &csol_cqe); break; case HWH_TYPE_LOGIN: beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, "BM_%d :\t\t No HWH_TYPE_LOGIN Expected in" " %s- Solicited path\n", __func__); break; case HWH_TYPE_NOP: be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe); break; default: beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, "BM_%d : In %s, unknown type = %d " "wrb_index 0x%x CID 0x%x\n", __func__, type, csol_cqe.wrb_index, csol_cqe.cid); break; } spin_unlock_bh(&session->back_lock); } /* * ASYNC PDUs include * a. Unsolicited NOP-In (target initiated NOP-In) * b. ASYNC Messages * c. Reject PDU * d. Login response * These headers arrive unprocessed by the EP firmware. * iSCSI layer processes them. */ static unsigned int beiscsi_complete_pdu(struct beiscsi_conn *beiscsi_conn, struct pdu_base *phdr, void *pdata, unsigned int dlen) { struct beiscsi_hba *phba = beiscsi_conn->phba; struct iscsi_conn *conn = beiscsi_conn->conn; struct beiscsi_io_task *io_task; struct iscsi_hdr *login_hdr; struct iscsi_task *task; u8 code; code = AMAP_GET_BITS(struct amap_pdu_base, opcode, phdr); switch (code) { case ISCSI_OP_NOOP_IN: pdata = NULL; dlen = 0; break; case ISCSI_OP_ASYNC_EVENT: break; case ISCSI_OP_REJECT: WARN_ON(!pdata); WARN_ON(!(dlen == 48)); beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, "BM_%d : In ISCSI_OP_REJECT\n"); break; case ISCSI_OP_LOGIN_RSP: case ISCSI_OP_TEXT_RSP: task = conn->login_task; io_task = task->dd_data; login_hdr = (struct iscsi_hdr *)phdr; login_hdr->itt = io_task->libiscsi_itt; break; default: beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, "BM_%d : unrecognized async PDU opcode 0x%x\n", code); return 1; } __iscsi_complete_pdu(conn, (struct iscsi_hdr *)phdr, pdata, dlen); return 0; } static inline void beiscsi_hdl_put_handle(struct hd_async_context *pasync_ctx, struct hd_async_handle *pasync_handle) { pasync_handle->is_final = 0; pasync_handle->buffer_len = 0; pasync_handle->in_use = 0; list_del_init(&pasync_handle->link); } static void beiscsi_hdl_purge_handles(struct beiscsi_hba *phba, struct hd_async_context *pasync_ctx, u16 cri) { struct hd_async_handle *pasync_handle, *tmp_handle; struct list_head *plist; plist = &pasync_ctx->async_entry[cri].wq.list; list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) beiscsi_hdl_put_handle(pasync_ctx, pasync_handle); INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wq.list); pasync_ctx->async_entry[cri].wq.hdr_len = 0; pasync_ctx->async_entry[cri].wq.bytes_received = 0; pasync_ctx->async_entry[cri].wq.bytes_needed = 0; } static struct hd_async_handle * beiscsi_hdl_get_handle(struct beiscsi_conn *beiscsi_conn, struct hd_async_context *pasync_ctx, struct i_t_dpdu_cqe *pdpdu_cqe, u8 *header) { struct beiscsi_hba *phba = beiscsi_conn->phba; struct hd_async_handle *pasync_handle; struct be_bus_address phys_addr; u16 cid, code, ci, cri; u8 final, error = 0; u32 dpl; cid = beiscsi_conn->beiscsi_conn_cid; cri = BE_GET_ASYNC_CRI_FROM_CID(cid); /** * This function is invoked to get the right async_handle structure * from a given DEF PDU CQ entry. * * - index in CQ entry gives the vertical index * - address in CQ entry is the offset where the DMA last ended * - final - no more notifications for this PDU */ if (is_chip_be2_be3r(phba)) { dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, dpl, pdpdu_cqe); ci = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, index, pdpdu_cqe); final = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, final, pdpdu_cqe); } else { dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, dpl, pdpdu_cqe); ci = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, index, pdpdu_cqe); final = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, final, pdpdu_cqe); } /** * DB addr Hi/Lo is same for BE and SKH. * Subtract the dataplacementlength to get to the base. */ phys_addr.u.a32.address_lo = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, db_addr_lo, pdpdu_cqe); phys_addr.u.a32.address_lo -= dpl; phys_addr.u.a32.address_hi = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, db_addr_hi, pdpdu_cqe); code = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, code, pdpdu_cqe); switch (code) { case UNSOL_HDR_NOTIFY: pasync_handle = pasync_ctx->async_entry[ci].header; *header = 1; break; case UNSOL_DATA_DIGEST_ERROR_NOTIFY: error = 1; fallthrough; case UNSOL_DATA_NOTIFY: pasync_handle = pasync_ctx->async_entry[ci].data; break; /* called only for above codes */ default: return NULL; } if (pasync_handle->pa.u.a64.address != phys_addr.u.a64.address || pasync_handle->index != ci) { /* driver bug - if ci does not match async handle index */ error = 1; beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, "BM_%d : cid %u async PDU handle mismatch - addr in %cQE %llx at %u:addr in CQE %llx ci %u\n", cid, pasync_handle->is_header ? 'H' : 'D', pasync_handle->pa.u.a64.address, pasync_handle->index, phys_addr.u.a64.address, ci); /* FW has stale address - attempt continuing by dropping */ } /** * DEF PDU header and data buffers with errors should be simply * dropped as there are no consumers for it. */ if (error) { beiscsi_hdl_put_handle(pasync_ctx, pasync_handle); return NULL; } if (pasync_handle->in_use || !list_empty(&pasync_handle->link)) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, "BM_%d : cid %d async PDU handle in use - code %d ci %d addr %llx\n", cid, code, ci, phys_addr.u.a64.address); beiscsi_hdl_purge_handles(phba, pasync_ctx, cri); } list_del_init(&pasync_handle->link); /** * Each CID is associated with unique CRI. * ASYNC_CRI_FROM_CID mapping and CRI_FROM_CID are totaly different. **/ pasync_handle->cri = cri; pasync_handle->is_final = final; pasync_handle->buffer_len = dpl; pasync_handle->in_use = 1; return pasync_handle; } static unsigned int beiscsi_hdl_fwd_pdu(struct beiscsi_conn *beiscsi_conn, struct hd_async_context *pasync_ctx, u16 cri) { struct iscsi_session *session = beiscsi_conn->conn->session; struct hd_async_handle *pasync_handle, *plast_handle; struct beiscsi_hba *phba = beiscsi_conn->phba; void *phdr = NULL, *pdata = NULL; u32 dlen = 0, status = 0; struct list_head *plist; plist = &pasync_ctx->async_entry[cri].wq.list; plast_handle = NULL; list_for_each_entry(pasync_handle, plist, link) { plast_handle = pasync_handle; /* get the header, the first entry */ if (!phdr) { phdr = pasync_handle->pbuffer; continue; } /* use first buffer to collect all the data */ if (!pdata) { pdata = pasync_handle->pbuffer; dlen = pasync_handle->buffer_len; continue; } if (!pasync_handle->buffer_len || (dlen + pasync_handle->buffer_len) > pasync_ctx->async_data.buffer_size) break; memcpy(pdata + dlen, pasync_handle->pbuffer, pasync_handle->buffer_len); dlen += pasync_handle->buffer_len; } if (!plast_handle->is_final) { /* last handle should have final PDU notification from FW */ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, "BM_%d : cid %u %p fwd async PDU opcode %x with last handle missing - HL%u:DN%u:DR%u\n", beiscsi_conn->beiscsi_conn_cid, plast_handle, AMAP_GET_BITS(struct amap_pdu_base, opcode, phdr), pasync_ctx->async_entry[cri].wq.hdr_len, pasync_ctx->async_entry[cri].wq.bytes_needed, pasync_ctx->async_entry[cri].wq.bytes_received); } spin_lock_bh(&session->back_lock); status = beiscsi_complete_pdu(beiscsi_conn, phdr, pdata, dlen); spin_unlock_bh(&session->back_lock); beiscsi_hdl_purge_handles(phba, pasync_ctx, cri); return status; } static unsigned int beiscsi_hdl_gather_pdu(struct beiscsi_conn *beiscsi_conn, struct hd_async_context *pasync_ctx, struct hd_async_handle *pasync_handle) { unsigned int bytes_needed = 0, status = 0; u16 cri = pasync_handle->cri; struct cri_wait_queue *wq; struct beiscsi_hba *phba; struct pdu_base *ppdu; char *err = ""; phba = beiscsi_conn->phba; wq = &pasync_ctx->async_entry[cri].wq; if (pasync_handle->is_header) { /* check if PDU hdr is rcv'd when old hdr not completed */ if (wq->hdr_len) { err = "incomplete"; goto drop_pdu; } ppdu = pasync_handle->pbuffer; bytes_needed = AMAP_GET_BITS(struct amap_pdu_base, data_len_hi, ppdu); bytes_needed <<= 16; bytes_needed |= be16_to_cpu(AMAP_GET_BITS(struct amap_pdu_base, data_len_lo, ppdu)); wq->hdr_len = pasync_handle->buffer_len; wq->bytes_received = 0; wq->bytes_needed = bytes_needed; list_add_tail(&pasync_handle->link, &wq->list); if (!bytes_needed) status = beiscsi_hdl_fwd_pdu(beiscsi_conn, pasync_ctx, cri); } else { /* check if data received has header and is needed */ if (!wq->hdr_len || !wq->bytes_needed) { err = "header less"; goto drop_pdu; } wq->bytes_received += pasync_handle->buffer_len; /* Something got overwritten? Better catch it here. */ if (wq->bytes_received > wq->bytes_needed) { err = "overflow"; goto drop_pdu; } list_add_tail(&pasync_handle->link, &wq->list); if (wq->bytes_received == wq->bytes_needed) status = beiscsi_hdl_fwd_pdu(beiscsi_conn, pasync_ctx, cri); } return status; drop_pdu: beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, "BM_%d : cid %u async PDU %s - def-%c:HL%u:DN%u:DR%u\n", beiscsi_conn->beiscsi_conn_cid, err, pasync_handle->is_header ? 'H' : 'D', wq->hdr_len, wq->bytes_needed, pasync_handle->buffer_len); /* discard this handle */ beiscsi_hdl_put_handle(pasync_ctx, pasync_handle); /* free all the other handles in cri_wait_queue */ beiscsi_hdl_purge_handles(phba, pasync_ctx, cri); /* try continuing */ return status; } static void beiscsi_hdq_post_handles(struct beiscsi_hba *phba, u8 header, u8 ulp_num, u16 nbuf) { struct hd_async_handle *pasync_handle; struct hd_async_context *pasync_ctx; struct hwi_controller *phwi_ctrlr; struct phys_addr *pasync_sge; u32 ring_id, doorbell = 0; u32 doorbell_offset; u16 prod, pi; phwi_ctrlr = phba->phwi_ctrlr; pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num); if (header) { pasync_sge = pasync_ctx->async_header.ring_base; pi = pasync_ctx->async_header.pi; ring_id = phwi_ctrlr->default_pdu_hdr[ulp_num].id; doorbell_offset = phwi_ctrlr->default_pdu_hdr[ulp_num]. doorbell_offset; } else { pasync_sge = pasync_ctx->async_data.ring_base; pi = pasync_ctx->async_data.pi; ring_id = phwi_ctrlr->default_pdu_data[ulp_num].id; doorbell_offset = phwi_ctrlr->default_pdu_data[ulp_num]. doorbell_offset; } for (prod = 0; prod < nbuf; prod++) { if (header) pasync_handle = pasync_ctx->async_entry[pi].header; else pasync_handle = pasync_ctx->async_entry[pi].data; WARN_ON(pasync_handle->is_header != header); WARN_ON(pasync_handle->index != pi); /* setup the ring only once */ if (nbuf == pasync_ctx->num_entries) { /* note hi is lo */ pasync_sge[pi].hi = pasync_handle->pa.u.a32.address_lo; pasync_sge[pi].lo = pasync_handle->pa.u.a32.address_hi; } if (++pi == pasync_ctx->num_entries) pi = 0; } if (header) pasync_ctx->async_header.pi = pi; else pasync_ctx->async_data.pi = pi; doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK; doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT; doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT; doorbell |= (prod & DB_DEF_PDU_CQPROC_MASK) << DB_DEF_PDU_CQPROC_SHIFT; iowrite32(doorbell, phba->db_va + doorbell_offset); } static void beiscsi_hdq_process_compl(struct beiscsi_conn *beiscsi_conn, struct i_t_dpdu_cqe *pdpdu_cqe) { struct beiscsi_hba *phba = beiscsi_conn->phba; struct hd_async_handle *pasync_handle = NULL; struct hd_async_context *pasync_ctx; struct hwi_controller *phwi_ctrlr; u8 ulp_num, consumed, header = 0; u16 cid_cri; phwi_ctrlr = phba->phwi_ctrlr; cid_cri = BE_GET_CRI_FROM_CID(beiscsi_conn->beiscsi_conn_cid); ulp_num = BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, cid_cri); pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num); pasync_handle = beiscsi_hdl_get_handle(beiscsi_conn, pasync_ctx, pdpdu_cqe, &header); if (is_chip_be2_be3r(phba)) consumed = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, num_cons, pdpdu_cqe); else consumed = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, num_cons, pdpdu_cqe); if (pasync_handle) beiscsi_hdl_gather_pdu(beiscsi_conn, pasync_ctx, pasync_handle); /* num_cons indicates number of 8 RQEs consumed */ if (consumed) beiscsi_hdq_post_handles(phba, header, ulp_num, 8 * consumed); } void beiscsi_process_mcc_cq(struct beiscsi_hba *phba) { struct be_queue_info *mcc_cq; struct be_mcc_compl *mcc_compl; unsigned int num_processed = 0; mcc_cq = &phba->ctrl.mcc_obj.cq; mcc_compl = queue_tail_node(mcc_cq); mcc_compl->flags = le32_to_cpu(mcc_compl->flags); while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) { if (beiscsi_hba_in_error(phba)) return; if (num_processed >= 32) { hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 0); num_processed = 0; } if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) { beiscsi_process_async_event(phba, mcc_compl); } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) { beiscsi_process_mcc_compl(&phba->ctrl, mcc_compl); } mcc_compl->flags = 0; queue_tail_inc(mcc_cq); mcc_compl = queue_tail_node(mcc_cq); mcc_compl->flags = le32_to_cpu(mcc_compl->flags); num_processed++; } if (num_processed > 0) hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1); } static void beiscsi_mcc_work(struct work_struct *work) { struct be_eq_obj *pbe_eq; struct beiscsi_hba *phba; pbe_eq = container_of(work, struct be_eq_obj, mcc_work); phba = pbe_eq->phba; beiscsi_process_mcc_cq(phba); /* rearm EQ for further interrupts */ if (!beiscsi_hba_in_error(phba)) hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1); } /** * beiscsi_process_cq()- Process the Completion Queue * @pbe_eq: Event Q on which the Completion has come * @budget: Max number of events to processed * * return * Number of Completion Entries processed. **/ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget) { struct be_queue_info *cq; struct sol_cqe *sol; unsigned int total = 0; unsigned int num_processed = 0; unsigned short code = 0, cid = 0; uint16_t cri_index = 0; struct beiscsi_conn *beiscsi_conn; struct beiscsi_endpoint *beiscsi_ep; struct iscsi_endpoint *ep; struct beiscsi_hba *phba; cq = pbe_eq->cq; sol = queue_tail_node(cq); phba = pbe_eq->phba; while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] & CQE_VALID_MASK) { if (beiscsi_hba_in_error(phba)) return 0; be_dws_le_to_cpu(sol, sizeof(struct sol_cqe)); code = (sol->dw[offsetof(struct amap_sol_cqe, code) / 32] & CQE_CODE_MASK); /* Get the CID */ if (is_chip_be2_be3r(phba)) { cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol); } else { if ((code == DRIVERMSG_NOTIFY) || (code == UNSOL_HDR_NOTIFY) || (code == UNSOL_DATA_NOTIFY)) cid = AMAP_GET_BITS( struct amap_i_t_dpdu_cqe_v2, cid, sol); else cid = AMAP_GET_BITS(struct amap_sol_cqe_v2, cid, sol); } cri_index = BE_GET_CRI_FROM_CID(cid); ep = phba->ep_array[cri_index]; if (ep == NULL) { /* connection has already been freed * just move on to next one */ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, "BM_%d : proc cqe of disconn ep: cid %d\n", cid); goto proc_next_cqe; } beiscsi_ep = ep->dd_data; beiscsi_conn = beiscsi_ep->conn; /* replenish cq */ if (num_processed == 32) { hwi_ring_cq_db(phba, cq->id, 32, 0); num_processed = 0; } total++; switch (code) { case SOL_CMD_COMPLETE: hwi_complete_cmd(beiscsi_conn, phba, sol); break; case DRIVERMSG_NOTIFY: beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, "BM_%d : Received %s[%d] on CID : %d\n", cqe_desc[code], code, cid); hwi_complete_drvr_msgs(beiscsi_conn, phba, sol); break; case UNSOL_HDR_NOTIFY: beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, "BM_%d : Received %s[%d] on CID : %d\n", cqe_desc[code], code, cid); spin_lock_bh(&phba->async_pdu_lock); beiscsi_hdq_process_compl(beiscsi_conn, (struct i_t_dpdu_cqe *)sol); spin_unlock_bh(&phba->async_pdu_lock); break; case UNSOL_DATA_NOTIFY: beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, "BM_%d : Received %s[%d] on CID : %d\n", cqe_desc[code], code, cid); spin_lock_bh(&phba->async_pdu_lock); beiscsi_hdq_process_compl(beiscsi_conn, (struct i_t_dpdu_cqe *)sol); spin_unlock_bh(&phba->async_pdu_lock); break; case CXN_INVALIDATE_INDEX_NOTIFY: case CMD_INVALIDATED_NOTIFY: case CXN_INVALIDATE_NOTIFY: beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, "BM_%d : Ignoring %s[%d] on CID : %d\n", cqe_desc[code], code, cid); break; case CXN_KILLED_HDR_DIGEST_ERR: case SOL_CMD_KILLED_DATA_DIGEST_ERR: beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, "BM_%d : Cmd Notification %s[%d] on CID : %d\n", cqe_desc[code], code, cid); break; case CMD_KILLED_INVALID_STATSN_RCVD: case CMD_KILLED_INVALID_R2T_RCVD: case CMD_CXN_KILLED_LUN_INVALID: case CMD_CXN_KILLED_ICD_INVALID: case CMD_CXN_KILLED_ITT_INVALID: case CMD_CXN_KILLED_SEQ_OUTOFORDER: case CMD_CXN_KILLED_INVALID_DATASN_RCVD: beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, "BM_%d : Cmd Notification %s[%d] on CID : %d\n", cqe_desc[code], code, cid); break; case UNSOL_DATA_DIGEST_ERROR_NOTIFY: beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, "BM_%d : Dropping %s[%d] on DPDU ring on CID : %d\n", cqe_desc[code], code, cid); spin_lock_bh(&phba->async_pdu_lock); /* driver consumes the entry and drops the contents */ beiscsi_hdq_process_compl(beiscsi_conn, (struct i_t_dpdu_cqe *)sol); spin_unlock_bh(&phba->async_pdu_lock); break; case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL: case CXN_KILLED_BURST_LEN_MISMATCH: case CXN_KILLED_AHS_RCVD: case CXN_KILLED_UNKNOWN_HDR: case CXN_KILLED_STALE_ITT_TTT_RCVD: case CXN_KILLED_INVALID_ITT_TTT_RCVD: case CXN_KILLED_TIMED_OUT: case CXN_KILLED_FIN_RCVD: case CXN_KILLED_RST_SENT: case CXN_KILLED_RST_RCVD: case CXN_KILLED_BAD_UNSOL_PDU_RCVD: case CXN_KILLED_BAD_WRB_INDEX_ERROR: case CXN_KILLED_OVER_RUN_RESIDUAL: case CXN_KILLED_UNDER_RUN_RESIDUAL: case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN: beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, "BM_%d : Event %s[%d] received on CID : %d\n", cqe_desc[code], code, cid); if (beiscsi_conn) iscsi_conn_failure(beiscsi_conn->conn, ISCSI_ERR_CONN_FAILED); break; default: beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, "BM_%d : Invalid CQE Event Received Code : %d CID 0x%x...\n", code, cid); break; } proc_next_cqe: AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0); queue_tail_inc(cq); sol = queue_tail_node(cq); num_processed++; if (total == budget) break; } hwi_ring_cq_db(phba, cq->id, num_processed, 1); return total; } static int be_iopoll(struct irq_poll *iop, int budget) { unsigned int ret, io_events; struct beiscsi_hba *phba; struct be_eq_obj *pbe_eq; struct be_eq_entry *eqe = NULL; struct be_queue_info *eq; pbe_eq = container_of(iop, struct be_eq_obj, iopoll); phba = pbe_eq->phba; if (beiscsi_hba_in_error(phba)) { irq_poll_complete(iop); return 0; } io_events = 0; eq = &pbe_eq->q; eqe = queue_tail_node(eq); while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] & EQE_VALID_MASK) { AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); queue_tail_inc(eq); eqe = queue_tail_node(eq); io_events++; } hwi_ring_eq_db(phba, eq->id, 1, io_events, 0, 1); ret = beiscsi_process_cq(pbe_eq, budget); pbe_eq->cq_count += ret; if (ret < budget) { irq_poll_complete(iop); beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, "BM_%d : rearm pbe_eq->q.id =%d ret %d\n", pbe_eq->q.id, ret); if (!beiscsi_hba_in_error(phba)) hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1); } return ret; } static void hwi_write_sgl_v2(struct iscsi_wrb *pwrb, struct scatterlist *sg, unsigned int num_sg, struct beiscsi_io_task *io_task) { struct iscsi_sge *psgl; unsigned int sg_len, index; unsigned int sge_len = 0; unsigned long long addr; struct scatterlist *l_sg; unsigned int offset; AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_lo, pwrb, io_task->bhs_pa.u.a32.address_lo); AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_hi, pwrb, io_task->bhs_pa.u.a32.address_hi); l_sg = sg; for (index = 0; (index < num_sg) && (index < 2); index++, sg = sg_next(sg)) { if (index == 0) { sg_len = sg_dma_len(sg); addr = (u64) sg_dma_address(sg); AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_addr_lo, pwrb, lower_32_bits(addr)); AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_addr_hi, pwrb, upper_32_bits(addr)); AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_len, pwrb, sg_len); sge_len = sg_len; } else { AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_r2t_offset, pwrb, sge_len); sg_len = sg_dma_len(sg); addr = (u64) sg_dma_address(sg); AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_addr_lo, pwrb, lower_32_bits(addr)); AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_addr_hi, pwrb, upper_32_bits(addr)); AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_len, pwrb, sg_len); } } psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; memset(psgl, 0, sizeof(*psgl) * BE2_SGE); AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2); AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, io_task->bhs_pa.u.a32.address_hi); AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, io_task->bhs_pa.u.a32.address_lo); if (num_sg == 1) { AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, 1); AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, 0); } else if (num_sg == 2) { AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, 0); AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, 1); } else { AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, 0); AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, 0); } sg = l_sg; psgl++; psgl++; offset = 0; for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) { sg_len = sg_dma_len(sg); addr = (u64) sg_dma_address(sg); AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, lower_32_bits(addr)); AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, upper_32_bits(addr)); AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len); AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset); AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); offset += sg_len; } psgl--; AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); } static void hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg, unsigned int num_sg, struct beiscsi_io_task *io_task) { struct iscsi_sge *psgl; unsigned int sg_len, index; unsigned int sge_len = 0; unsigned long long addr; struct scatterlist *l_sg; unsigned int offset; AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb, io_task->bhs_pa.u.a32.address_lo); AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb, io_task->bhs_pa.u.a32.address_hi); l_sg = sg; for (index = 0; (index < num_sg) && (index < 2); index++, sg = sg_next(sg)) { if (index == 0) { sg_len = sg_dma_len(sg); addr = (u64) sg_dma_address(sg); AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb, ((u32)(addr & 0xFFFFFFFF))); AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb, ((u32)(addr >> 32))); AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb, sg_len); sge_len = sg_len; } else { AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset, pwrb, sge_len); sg_len = sg_dma_len(sg); addr = (u64) sg_dma_address(sg); AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb, ((u32)(addr & 0xFFFFFFFF))); AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb, ((u32)(addr >> 32))); AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb, sg_len); } } psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; memset(psgl, 0, sizeof(*psgl) * BE2_SGE); AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2); AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, io_task->bhs_pa.u.a32.address_hi); AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, io_task->bhs_pa.u.a32.address_lo); if (num_sg == 1) { AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1); AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 0); } else if (num_sg == 2) { AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 0); AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 1); } else { AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 0); AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 0); } sg = l_sg; psgl++; psgl++; offset = 0; for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) { sg_len = sg_dma_len(sg); addr = (u64) sg_dma_address(sg); AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, (addr & 0xFFFFFFFF)); AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, (addr >> 32)); AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len); AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset); AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); offset += sg_len; } psgl--; AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); } /** * hwi_write_buffer()- Populate the WRB with task info * @pwrb: ptr to the WRB entry * @task: iscsi task which is to be executed **/ static int hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task) { struct iscsi_sge *psgl; struct beiscsi_io_task *io_task = task->dd_data; struct beiscsi_conn *beiscsi_conn = io_task->conn; struct beiscsi_hba *phba = beiscsi_conn->phba; uint8_t dsp_value = 0; io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2; AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb, io_task->bhs_pa.u.a32.address_lo); AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb, io_task->bhs_pa.u.a32.address_hi); if (task->data) { /* Check for the data_count */ dsp_value = (task->data_count) ? 1 : 0; if (is_chip_be2_be3r(phba)) AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, dsp_value); else AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, dsp_value); /* Map addr only if there is data_count */ if (dsp_value) { io_task->mtask_addr = dma_map_single(&phba->pcidev->dev, task->data, task->data_count, DMA_TO_DEVICE); if (dma_mapping_error(&phba->pcidev->dev, io_task->mtask_addr)) return -ENOMEM; io_task->mtask_data_count = task->data_count; } else io_task->mtask_addr = 0; AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb, lower_32_bits(io_task->mtask_addr)); AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb, upper_32_bits(io_task->mtask_addr)); AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb, task->data_count); AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1); } else { AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); io_task->mtask_addr = 0; } psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len); AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, io_task->bhs_pa.u.a32.address_hi); AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, io_task->bhs_pa.u.a32.address_lo); if (task->data) { psgl++; AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0); AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0); AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0); AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0); AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0); AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); psgl++; if (task->data) { AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, lower_32_bits(io_task->mtask_addr)); AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, upper_32_bits(io_task->mtask_addr)); } AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106); } AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); return 0; } /** * beiscsi_find_mem_req()- Find mem needed * @phba: ptr to HBA struct **/ static void beiscsi_find_mem_req(struct beiscsi_hba *phba) { uint8_t mem_descr_index, ulp_num; unsigned int num_async_pdu_buf_pages; unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn; unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages; phba->params.hwi_ws_sz = sizeof(struct hwi_controller); phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 * BE_ISCSI_PDU_HEADER_SIZE; phba->mem_req[HWI_MEM_ADDN_CONTEXT] = sizeof(struct hwi_context_memory); phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb) * (phba->params.wrbs_per_cxn) * phba->params.cxns_per_ctrl; wrb_sz_per_cxn = sizeof(struct wrb_handle) * (phba->params.wrbs_per_cxn); phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) * phba->params.cxns_per_ctrl); phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) * phba->params.icds_per_ctrl; phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) * phba->params.num_sge_per_io * phba->params.icds_per_ctrl; for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { num_async_pdu_buf_sgl_pages = PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE( phba, ulp_num) * sizeof(struct phys_addr)); num_async_pdu_buf_pages = PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE( phba, ulp_num) * phba->params.defpdu_hdr_sz); num_async_pdu_data_pages = PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE( phba, ulp_num) * phba->params.defpdu_data_sz); num_async_pdu_data_sgl_pages = PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE( phba, ulp_num) * sizeof(struct phys_addr)); mem_descr_index = (HWI_MEM_TEMPLATE_HDR_ULP0 + (ulp_num * MEM_DESCR_OFFSET)); phba->mem_req[mem_descr_index] = BEISCSI_GET_CID_COUNT(phba, ulp_num) * BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE; mem_descr_index = (HWI_MEM_ASYNC_HEADER_BUF_ULP0 + (ulp_num * MEM_DESCR_OFFSET)); phba->mem_req[mem_descr_index] = num_async_pdu_buf_pages * PAGE_SIZE; mem_descr_index = (HWI_MEM_ASYNC_DATA_BUF_ULP0 + (ulp_num * MEM_DESCR_OFFSET)); phba->mem_req[mem_descr_index] = num_async_pdu_data_pages * PAGE_SIZE; mem_descr_index = (HWI_MEM_ASYNC_HEADER_RING_ULP0 + (ulp_num * MEM_DESCR_OFFSET)); phba->mem_req[mem_descr_index] = num_async_pdu_buf_sgl_pages * PAGE_SIZE; mem_descr_index = (HWI_MEM_ASYNC_DATA_RING_ULP0 + (ulp_num * MEM_DESCR_OFFSET)); phba->mem_req[mem_descr_index] = num_async_pdu_data_sgl_pages * PAGE_SIZE; mem_descr_index = (HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 + (ulp_num * MEM_DESCR_OFFSET)); phba->mem_req[mem_descr_index] = BEISCSI_ASYNC_HDQ_SIZE(phba, ulp_num) * sizeof(struct hd_async_handle); mem_descr_index = (HWI_MEM_ASYNC_DATA_HANDLE_ULP0 + (ulp_num * MEM_DESCR_OFFSET)); phba->mem_req[mem_descr_index] = BEISCSI_ASYNC_HDQ_SIZE(phba, ulp_num) * sizeof(struct hd_async_handle); mem_descr_index = (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 + (ulp_num * MEM_DESCR_OFFSET)); phba->mem_req[mem_descr_index] = sizeof(struct hd_async_context) + (BEISCSI_ASYNC_HDQ_SIZE(phba, ulp_num) * sizeof(struct hd_async_entry)); } } } static int beiscsi_alloc_mem(struct beiscsi_hba *phba) { dma_addr_t bus_add; struct hwi_controller *phwi_ctrlr; struct be_mem_descriptor *mem_descr; struct mem_array *mem_arr, *mem_arr_orig; unsigned int i, j, alloc_size, curr_alloc_size; phba->phwi_ctrlr = kzalloc(phba->params.hwi_ws_sz, GFP_KERNEL); if (!phba->phwi_ctrlr) return -ENOMEM; /* Allocate memory for wrb_context */ phwi_ctrlr = phba->phwi_ctrlr; phwi_ctrlr->wrb_context = kcalloc(phba->params.cxns_per_ctrl, sizeof(struct hwi_wrb_context), GFP_KERNEL); if (!phwi_ctrlr->wrb_context) { kfree(phba->phwi_ctrlr); return -ENOMEM; } phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr), GFP_KERNEL); if (!phba->init_mem) { kfree(phwi_ctrlr->wrb_context); kfree(phba->phwi_ctrlr); return -ENOMEM; } mem_arr_orig = kmalloc_array(BEISCSI_MAX_FRAGS_INIT, sizeof(*mem_arr_orig), GFP_KERNEL); if (!mem_arr_orig) { kfree(phba->init_mem); kfree(phwi_ctrlr->wrb_context); kfree(phba->phwi_ctrlr); return -ENOMEM; } mem_descr = phba->init_mem; for (i = 0; i < SE_MEM_MAX; i++) { if (!phba->mem_req[i]) { mem_descr->mem_array = NULL; mem_descr++; continue; } j = 0; mem_arr = mem_arr_orig; alloc_size = phba->mem_req[i]; memset(mem_arr, 0, sizeof(struct mem_array) * BEISCSI_MAX_FRAGS_INIT); curr_alloc_size = min(be_max_phys_size * 1024, alloc_size); do { mem_arr->virtual_address = dma_alloc_coherent(&phba->pcidev->dev, curr_alloc_size, &bus_add, GFP_KERNEL); if (!mem_arr->virtual_address) { if (curr_alloc_size <= BE_MIN_MEM_SIZE) goto free_mem; if (curr_alloc_size - rounddown_pow_of_two(curr_alloc_size)) curr_alloc_size = rounddown_pow_of_two (curr_alloc_size); else curr_alloc_size = curr_alloc_size / 2; } else { mem_arr->bus_address.u. a64.address = (__u64) bus_add; mem_arr->size = curr_alloc_size; alloc_size -= curr_alloc_size; curr_alloc_size = min(be_max_phys_size * 1024, alloc_size); j++; mem_arr++; } } while (alloc_size); mem_descr->num_elements = j; mem_descr->size_in_bytes = phba->mem_req[i]; mem_descr->mem_array = kmalloc_array(j, sizeof(*mem_arr), GFP_KERNEL); if (!mem_descr->mem_array) goto free_mem; memcpy(mem_descr->mem_array, mem_arr_orig, sizeof(struct mem_array) * j); mem_descr++; } kfree(mem_arr_orig); return 0; free_mem: mem_descr->num_elements = j; while ((i) || (j)) { for (j = mem_descr->num_elements; j > 0; j--) { dma_free_coherent(&phba->pcidev->dev, mem_descr->mem_array[j - 1].size, mem_descr->mem_array[j - 1]. virtual_address, (unsigned long)mem_descr-> mem_array[j - 1]. bus_address.u.a64.address); } if (i) { i--; kfree(mem_descr->mem_array); mem_descr--; } } kfree(mem_arr_orig); kfree(phba->init_mem); kfree(phba->phwi_ctrlr->wrb_context); kfree(phba->phwi_ctrlr); return -ENOMEM; } static int beiscsi_get_memory(struct beiscsi_hba *phba) { beiscsi_find_mem_req(phba); return beiscsi_alloc_mem(phba); } static void iscsi_init_global_templates(struct beiscsi_hba *phba) { struct pdu_data_out *pdata_out; struct pdu_nop_out *pnop_out; struct be_mem_descriptor *mem_descr; mem_descr = phba->init_mem; mem_descr += ISCSI_MEM_GLOBAL_HEADER; pdata_out = (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address; memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE); AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out, IIOC_SCSI_DATA); pnop_out = (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0]. virtual_address + BE_ISCSI_PDU_HEADER_SIZE); memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE); AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF); AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1); AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0); } static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba) { struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb; struct hwi_context_memory *phwi_ctxt; struct wrb_handle *pwrb_handle = NULL; struct hwi_controller *phwi_ctrlr; struct hwi_wrb_context *pwrb_context; struct iscsi_wrb *pwrb = NULL; unsigned int num_cxn_wrbh = 0; unsigned int num_cxn_wrb = 0, j, idx = 0, index; mem_descr_wrbh = phba->init_mem; mem_descr_wrbh += HWI_MEM_WRBH; mem_descr_wrb = phba->init_mem; mem_descr_wrb += HWI_MEM_WRB; phwi_ctrlr = phba->phwi_ctrlr; /* Allocate memory for WRBQ */ phwi_ctxt = phwi_ctrlr->phwi_ctxt; phwi_ctxt->be_wrbq = kcalloc(phba->params.cxns_per_ctrl, sizeof(struct be_queue_info), GFP_KERNEL); if (!phwi_ctxt->be_wrbq) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : WRBQ Mem Alloc Failed\n"); return -ENOMEM; } for (index = 0; index < phba->params.cxns_per_ctrl; index++) { pwrb_context = &phwi_ctrlr->wrb_context[index]; pwrb_context->pwrb_handle_base = kcalloc(phba->params.wrbs_per_cxn, sizeof(struct wrb_handle *), GFP_KERNEL); if (!pwrb_context->pwrb_handle_base) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : Mem Alloc Failed. Failing to load\n"); goto init_wrb_hndl_failed; } pwrb_context->pwrb_handle_basestd = kcalloc(phba->params.wrbs_per_cxn, sizeof(struct wrb_handle *), GFP_KERNEL); if (!pwrb_context->pwrb_handle_basestd) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : Mem Alloc Failed. Failing to load\n"); goto init_wrb_hndl_failed; } if (!num_cxn_wrbh) { pwrb_handle = mem_descr_wrbh->mem_array[idx].virtual_address; num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) / ((sizeof(struct wrb_handle)) * phba->params.wrbs_per_cxn)); idx++; } pwrb_context->alloc_index = 0; pwrb_context->wrb_handles_available = 0; pwrb_context->free_index = 0; if (num_cxn_wrbh) { for (j = 0; j < phba->params.wrbs_per_cxn; j++) { pwrb_context->pwrb_handle_base[j] = pwrb_handle; pwrb_context->pwrb_handle_basestd[j] = pwrb_handle; pwrb_context->wrb_handles_available++; pwrb_handle->wrb_index = j; pwrb_handle++; } num_cxn_wrbh--; } spin_lock_init(&pwrb_context->wrb_lock); } idx = 0; for (index = 0; index < phba->params.cxns_per_ctrl; index++) { pwrb_context = &phwi_ctrlr->wrb_context[index]; if (!num_cxn_wrb) { pwrb = mem_descr_wrb->mem_array[idx].virtual_address; num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) / ((sizeof(struct iscsi_wrb) * phba->params.wrbs_per_cxn)); idx++; } if (num_cxn_wrb) { for (j = 0; j < phba->params.wrbs_per_cxn; j++) { pwrb_handle = pwrb_context->pwrb_handle_base[j]; pwrb_handle->pwrb = pwrb; pwrb++; } num_cxn_wrb--; } } return 0; init_wrb_hndl_failed: for (j = index; j > 0; j--) { pwrb_context = &phwi_ctrlr->wrb_context[j]; kfree(pwrb_context->pwrb_handle_base); kfree(pwrb_context->pwrb_handle_basestd); } return -ENOMEM; } static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) { uint8_t ulp_num; struct hwi_controller *phwi_ctrlr; struct hba_parameters *p = &phba->params; struct hd_async_context *pasync_ctx; struct hd_async_handle *pasync_header_h, *pasync_data_h; unsigned int index, idx, num_per_mem, num_async_data; struct be_mem_descriptor *mem_descr; for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { /* get async_ctx for each ULP */ mem_descr = (struct be_mem_descriptor *)phba->init_mem; mem_descr += (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 + (ulp_num * MEM_DESCR_OFFSET)); phwi_ctrlr = phba->phwi_ctrlr; phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num] = (struct hd_async_context *) mem_descr->mem_array[0].virtual_address; pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num]; memset(pasync_ctx, 0, sizeof(*pasync_ctx)); pasync_ctx->async_entry = (struct hd_async_entry *) ((long unsigned int)pasync_ctx + sizeof(struct hd_async_context)); pasync_ctx->num_entries = BEISCSI_ASYNC_HDQ_SIZE(phba, ulp_num); /* setup header buffers */ mem_descr = (struct be_mem_descriptor *)phba->init_mem; mem_descr += HWI_MEM_ASYNC_HEADER_BUF_ULP0 + (ulp_num * MEM_DESCR_OFFSET); if (mem_descr->mem_array[0].virtual_address) { beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "BM_%d : hwi_init_async_pdu_ctx" " HWI_MEM_ASYNC_HEADER_BUF_ULP%d va=%p\n", ulp_num, mem_descr->mem_array[0]. virtual_address); } else beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, "BM_%d : No Virtual address for ULP : %d\n", ulp_num); pasync_ctx->async_header.pi = 0; pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz; pasync_ctx->async_header.va_base = mem_descr->mem_array[0].virtual_address; pasync_ctx->async_header.pa_base.u.a64.address = mem_descr->mem_array[0]. bus_address.u.a64.address; /* setup header buffer sgls */ mem_descr = (struct be_mem_descriptor *)phba->init_mem; mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 + (ulp_num * MEM_DESCR_OFFSET); if (mem_descr->mem_array[0].virtual_address) { beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "BM_%d : hwi_init_async_pdu_ctx" " HWI_MEM_ASYNC_HEADER_RING_ULP%d va=%p\n", ulp_num, mem_descr->mem_array[0]. virtual_address); } else beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, "BM_%d : No Virtual address for ULP : %d\n", ulp_num); pasync_ctx->async_header.ring_base = mem_descr->mem_array[0].virtual_address; /* setup header buffer handles */ mem_descr = (struct be_mem_descriptor *)phba->init_mem; mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 + (ulp_num * MEM_DESCR_OFFSET); if (mem_descr->mem_array[0].virtual_address) { beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "BM_%d : hwi_init_async_pdu_ctx" " HWI_MEM_ASYNC_HEADER_HANDLE_ULP%d va=%p\n", ulp_num, mem_descr->mem_array[0]. virtual_address); } else beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, "BM_%d : No Virtual address for ULP : %d\n", ulp_num); pasync_ctx->async_header.handle_base = mem_descr->mem_array[0].virtual_address; /* setup data buffer sgls */ mem_descr = (struct be_mem_descriptor *)phba->init_mem; mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 + (ulp_num * MEM_DESCR_OFFSET); if (mem_descr->mem_array[0].virtual_address) { beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "BM_%d : hwi_init_async_pdu_ctx" " HWI_MEM_ASYNC_DATA_RING_ULP%d va=%p\n", ulp_num, mem_descr->mem_array[0]. virtual_address); } else beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, "BM_%d : No Virtual address for ULP : %d\n", ulp_num); pasync_ctx->async_data.ring_base = mem_descr->mem_array[0].virtual_address; /* setup data buffer handles */ mem_descr = (struct be_mem_descriptor *)phba->init_mem; mem_descr += HWI_MEM_ASYNC_DATA_HANDLE_ULP0 + (ulp_num * MEM_DESCR_OFFSET); if (!mem_descr->mem_array[0].virtual_address) beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, "BM_%d : No Virtual address for ULP : %d\n", ulp_num); pasync_ctx->async_data.handle_base = mem_descr->mem_array[0].virtual_address; pasync_header_h = (struct hd_async_handle *) pasync_ctx->async_header.handle_base; pasync_data_h = (struct hd_async_handle *) pasync_ctx->async_data.handle_base; /* setup data buffers */ mem_descr = (struct be_mem_descriptor *)phba->init_mem; mem_descr += HWI_MEM_ASYNC_DATA_BUF_ULP0 + (ulp_num * MEM_DESCR_OFFSET); if (mem_descr->mem_array[0].virtual_address) { beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "BM_%d : hwi_init_async_pdu_ctx" " HWI_MEM_ASYNC_DATA_BUF_ULP%d va=%p\n", ulp_num, mem_descr->mem_array[0]. virtual_address); } else beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, "BM_%d : No Virtual address for ULP : %d\n", ulp_num); idx = 0; pasync_ctx->async_data.pi = 0; pasync_ctx->async_data.buffer_size = p->defpdu_data_sz; pasync_ctx->async_data.va_base = mem_descr->mem_array[idx].virtual_address; pasync_ctx->async_data.pa_base.u.a64.address = mem_descr->mem_array[idx]. bus_address.u.a64.address; num_async_data = ((mem_descr->mem_array[idx].size) / phba->params.defpdu_data_sz); num_per_mem = 0; for (index = 0; index < BEISCSI_ASYNC_HDQ_SIZE (phba, ulp_num); index++) { pasync_header_h->cri = -1; pasync_header_h->is_header = 1; pasync_header_h->index = index; INIT_LIST_HEAD(&pasync_header_h->link); pasync_header_h->pbuffer = (void *)((unsigned long) (pasync_ctx-> async_header.va_base) + (p->defpdu_hdr_sz * index)); pasync_header_h->pa.u.a64.address = pasync_ctx->async_header.pa_base.u.a64. address + (p->defpdu_hdr_sz * index); pasync_ctx->async_entry[index].header = pasync_header_h; pasync_header_h++; INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. wq.list); pasync_data_h->cri = -1; pasync_data_h->is_header = 0; pasync_data_h->index = index; INIT_LIST_HEAD(&pasync_data_h->link); if (!num_async_data) { num_per_mem = 0; idx++; pasync_ctx->async_data.va_base = mem_descr->mem_array[idx]. virtual_address; pasync_ctx->async_data.pa_base.u. a64.address = mem_descr->mem_array[idx]. bus_address.u.a64.address; num_async_data = ((mem_descr->mem_array[idx]. size) / phba->params.defpdu_data_sz); } pasync_data_h->pbuffer = (void *)((unsigned long) (pasync_ctx->async_data.va_base) + (p->defpdu_data_sz * num_per_mem)); pasync_data_h->pa.u.a64.address = pasync_ctx->async_data.pa_base.u.a64. address + (p->defpdu_data_sz * num_per_mem); num_per_mem++; num_async_data--; pasync_ctx->async_entry[index].data = pasync_data_h; pasync_data_h++; } } } return 0; } static int be_sgl_create_contiguous(void *virtual_address, u64 physical_address, u32 length, struct be_dma_mem *sgl) { WARN_ON(!virtual_address); WARN_ON(!physical_address); WARN_ON(!length); WARN_ON(!sgl); sgl->va = virtual_address; sgl->dma = (unsigned long)physical_address; sgl->size = length; return 0; } static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl) { memset(sgl, 0, sizeof(*sgl)); } static void hwi_build_be_sgl_arr(struct beiscsi_hba *phba, struct mem_array *pmem, struct be_dma_mem *sgl) { if (sgl->va) be_sgl_destroy_contiguous(sgl); be_sgl_create_contiguous(pmem->virtual_address, pmem->bus_address.u.a64.address, pmem->size, sgl); } static void hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba, struct mem_array *pmem, struct be_dma_mem *sgl) { if (sgl->va) be_sgl_destroy_contiguous(sgl); be_sgl_create_contiguous((unsigned char *)pmem->virtual_address, pmem->bus_address.u.a64.address, pmem->size, sgl); } static int be_fill_queue(struct be_queue_info *q, u16 len, u16 entry_size, void *vaddress) { struct be_dma_mem *mem = &q->dma_mem; memset(q, 0, sizeof(*q)); q->len = len; q->entry_size = entry_size; mem->size = len * entry_size; mem->va = vaddress; if (!mem->va) return -ENOMEM; memset(mem->va, 0, mem->size); return 0; } static int beiscsi_create_eqs(struct beiscsi_hba *phba, struct hwi_context_memory *phwi_context) { int ret = -ENOMEM, eq_for_mcc; unsigned int i, num_eq_pages; struct be_queue_info *eq; struct be_dma_mem *mem; void *eq_vaddress; dma_addr_t paddr; num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * sizeof(struct be_eq_entry)); if (phba->pcidev->msix_enabled) eq_for_mcc = 1; else eq_for_mcc = 0; for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { eq = &phwi_context->be_eq[i].q; mem = &eq->dma_mem; phwi_context->be_eq[i].phba = phba; eq_vaddress = dma_alloc_coherent(&phba->pcidev->dev, num_eq_pages * PAGE_SIZE, &paddr, GFP_KERNEL); if (!eq_vaddress) { ret = -ENOMEM; goto create_eq_error; } mem->va = eq_vaddress; ret = be_fill_queue(eq, phba->params.num_eq_entries, sizeof(struct be_eq_entry), eq_vaddress); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : be_fill_queue Failed for EQ\n"); goto create_eq_error; } mem->dma = paddr; ret = beiscsi_cmd_eq_create(&phba->ctrl, eq, BEISCSI_EQ_DELAY_DEF); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : beiscsi_cmd_eq_create Failed for EQ\n"); goto create_eq_error; } beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "BM_%d : eqid = %d\n", phwi_context->be_eq[i].q.id); } return 0; create_eq_error: for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { eq = &phwi_context->be_eq[i].q; mem = &eq->dma_mem; if (mem->va) dma_free_coherent(&phba->pcidev->dev, num_eq_pages * PAGE_SIZE, mem->va, mem->dma); } return ret; } static int beiscsi_create_cqs(struct beiscsi_hba *phba, struct hwi_context_memory *phwi_context) { unsigned int i, num_cq_pages; struct be_queue_info *cq, *eq; struct be_dma_mem *mem; struct be_eq_obj *pbe_eq; void *cq_vaddress; int ret = -ENOMEM; dma_addr_t paddr; num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * sizeof(struct sol_cqe)); for (i = 0; i < phba->num_cpus; i++) { cq = &phwi_context->be_cq[i]; eq = &phwi_context->be_eq[i].q; pbe_eq = &phwi_context->be_eq[i]; pbe_eq->cq = cq; pbe_eq->phba = phba; mem = &cq->dma_mem; cq_vaddress = dma_alloc_coherent(&phba->pcidev->dev, num_cq_pages * PAGE_SIZE, &paddr, GFP_KERNEL); if (!cq_vaddress) { ret = -ENOMEM; goto create_cq_error; } ret = be_fill_queue(cq, phba->params.num_cq_entries, sizeof(struct sol_cqe), cq_vaddress); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : be_fill_queue Failed for ISCSI CQ\n"); goto create_cq_error; } mem->dma = paddr; ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false, false, 0); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : beiscsi_cmd_eq_create Failed for ISCSI CQ\n"); goto create_cq_error; } beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "BM_%d : iscsi cq_id is %d for eq_id %d\n" "iSCSI CQ CREATED\n", cq->id, eq->id); } return 0; create_cq_error: for (i = 0; i < phba->num_cpus; i++) { cq = &phwi_context->be_cq[i]; mem = &cq->dma_mem; if (mem->va) dma_free_coherent(&phba->pcidev->dev, num_cq_pages * PAGE_SIZE, mem->va, mem->dma); } return ret; } static int beiscsi_create_def_hdr(struct beiscsi_hba *phba, struct hwi_context_memory *phwi_context, struct hwi_controller *phwi_ctrlr, unsigned int def_pdu_ring_sz, uint8_t ulp_num) { unsigned int idx; int ret; struct be_queue_info *dq, *cq; struct be_dma_mem *mem; struct be_mem_descriptor *mem_descr; void *dq_vaddress; idx = 0; dq = &phwi_context->be_def_hdrq[ulp_num]; cq = &phwi_context->be_cq[0]; mem = &dq->dma_mem; mem_descr = phba->init_mem; mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 + (ulp_num * MEM_DESCR_OFFSET); dq_vaddress = mem_descr->mem_array[idx].virtual_address; ret = be_fill_queue(dq, mem_descr->mem_array[0].size / sizeof(struct phys_addr), sizeof(struct phys_addr), dq_vaddress); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : be_fill_queue Failed for DEF PDU HDR on ULP : %d\n", ulp_num); return ret; } mem->dma = (unsigned long)mem_descr->mem_array[idx]. bus_address.u.a64.address; ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq, def_pdu_ring_sz, phba->params.defpdu_hdr_sz, BEISCSI_DEFQ_HDR, ulp_num); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR on ULP : %d\n", ulp_num); return ret; } beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "BM_%d : iscsi hdr def pdu id for ULP : %d is %d\n", ulp_num, phwi_context->be_def_hdrq[ulp_num].id); return 0; } static int beiscsi_create_def_data(struct beiscsi_hba *phba, struct hwi_context_memory *phwi_context, struct hwi_controller *phwi_ctrlr, unsigned int def_pdu_ring_sz, uint8_t ulp_num) { unsigned int idx; int ret; struct be_queue_info *dataq, *cq; struct be_dma_mem *mem; struct be_mem_descriptor *mem_descr; void *dq_vaddress; idx = 0; dataq = &phwi_context->be_def_dataq[ulp_num]; cq = &phwi_context->be_cq[0]; mem = &dataq->dma_mem; mem_descr = phba->init_mem; mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 + (ulp_num * MEM_DESCR_OFFSET); dq_vaddress = mem_descr->mem_array[idx].virtual_address; ret = be_fill_queue(dataq, mem_descr->mem_array[0].size / sizeof(struct phys_addr), sizeof(struct phys_addr), dq_vaddress); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : be_fill_queue Failed for DEF PDU " "DATA on ULP : %d\n", ulp_num); return ret; } mem->dma = (unsigned long)mem_descr->mem_array[idx]. bus_address.u.a64.address; ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq, def_pdu_ring_sz, phba->params.defpdu_data_sz, BEISCSI_DEFQ_DATA, ulp_num); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d be_cmd_create_default_pdu_queue" " Failed for DEF PDU DATA on ULP : %d\n", ulp_num); return ret; } beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "BM_%d : iscsi def data id on ULP : %d is %d\n", ulp_num, phwi_context->be_def_dataq[ulp_num].id); beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "BM_%d : DEFAULT PDU DATA RING CREATED on ULP : %d\n", ulp_num); return 0; } static int beiscsi_post_template_hdr(struct beiscsi_hba *phba) { struct be_mem_descriptor *mem_descr; struct mem_array *pm_arr; struct be_dma_mem sgl; int status, ulp_num; for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { mem_descr = (struct be_mem_descriptor *)phba->init_mem; mem_descr += HWI_MEM_TEMPLATE_HDR_ULP0 + (ulp_num * MEM_DESCR_OFFSET); pm_arr = mem_descr->mem_array; hwi_build_be_sgl_arr(phba, pm_arr, &sgl); status = be_cmd_iscsi_post_template_hdr( &phba->ctrl, &sgl); if (status != 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : Post Template HDR Failed for " "ULP_%d\n", ulp_num); return status; } beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "BM_%d : Template HDR Pages Posted for " "ULP_%d\n", ulp_num); } } return 0; } static int beiscsi_post_pages(struct beiscsi_hba *phba) { struct be_mem_descriptor *mem_descr; struct mem_array *pm_arr; unsigned int page_offset, i; struct be_dma_mem sgl; int status, ulp_num = 0; mem_descr = phba->init_mem; mem_descr += HWI_MEM_SGE; pm_arr = mem_descr->mem_array; for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) break; page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io * phba->fw_config.iscsi_icd_start[ulp_num]) / PAGE_SIZE; for (i = 0; i < mem_descr->num_elements; i++) { hwi_build_be_sgl_arr(phba, pm_arr, &sgl); status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl, page_offset, (pm_arr->size / PAGE_SIZE)); page_offset += pm_arr->size / PAGE_SIZE; if (status != 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : post sgl failed.\n"); return status; } pm_arr++; } beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "BM_%d : POSTED PAGES\n"); return 0; } static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q) { struct be_dma_mem *mem = &q->dma_mem; if (mem->va) { dma_free_coherent(&phba->pcidev->dev, mem->size, mem->va, mem->dma); mem->va = NULL; } } static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q, u16 len, u16 entry_size) { struct be_dma_mem *mem = &q->dma_mem; memset(q, 0, sizeof(*q)); q->len = len; q->entry_size = entry_size; mem->size = len * entry_size; mem->va = dma_alloc_coherent(&phba->pcidev->dev, mem->size, &mem->dma, GFP_KERNEL); if (!mem->va) return -ENOMEM; return 0; } static int beiscsi_create_wrb_rings(struct beiscsi_hba *phba, struct hwi_context_memory *phwi_context, struct hwi_controller *phwi_ctrlr) { unsigned int num_wrb_rings; u64 pa_addr_lo; unsigned int idx, num, i, ulp_num; struct mem_array *pwrb_arr; void *wrb_vaddr; struct be_dma_mem sgl; struct be_mem_descriptor *mem_descr; struct hwi_wrb_context *pwrb_context; int status; uint8_t ulp_count = 0, ulp_base_num = 0; uint16_t cid_count_ulp[BEISCSI_ULP_COUNT] = { 0 }; idx = 0; mem_descr = phba->init_mem; mem_descr += HWI_MEM_WRB; pwrb_arr = kmalloc_array(phba->params.cxns_per_ctrl, sizeof(*pwrb_arr), GFP_KERNEL); if (!pwrb_arr) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : Memory alloc failed in create wrb ring.\n"); return -ENOMEM; } wrb_vaddr = mem_descr->mem_array[idx].virtual_address; pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address; num_wrb_rings = mem_descr->mem_array[idx].size / (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb)); for (num = 0; num < phba->params.cxns_per_ctrl; num++) { if (num_wrb_rings) { pwrb_arr[num].virtual_address = wrb_vaddr; pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo; pwrb_arr[num].size = phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb); wrb_vaddr += pwrb_arr[num].size; pa_addr_lo += pwrb_arr[num].size; num_wrb_rings--; } else { idx++; wrb_vaddr = mem_descr->mem_array[idx].virtual_address; pa_addr_lo = mem_descr->mem_array[idx]. bus_address.u.a64.address; num_wrb_rings = mem_descr->mem_array[idx].size / (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb)); pwrb_arr[num].virtual_address = wrb_vaddr; pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo; pwrb_arr[num].size = phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb); wrb_vaddr += pwrb_arr[num].size; pa_addr_lo += pwrb_arr[num].size; num_wrb_rings--; } } /* Get the ULP Count */ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { ulp_count++; ulp_base_num = ulp_num; cid_count_ulp[ulp_num] = BEISCSI_GET_CID_COUNT(phba, ulp_num); } for (i = 0; i < phba->params.cxns_per_ctrl; i++) { if (ulp_count > 1) { ulp_base_num = (ulp_base_num + 1) % BEISCSI_ULP_COUNT; if (!cid_count_ulp[ulp_base_num]) ulp_base_num = (ulp_base_num + 1) % BEISCSI_ULP_COUNT; cid_count_ulp[ulp_base_num]--; } hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl); status = be_cmd_wrbq_create(&phba->ctrl, &sgl, &phwi_context->be_wrbq[i], &phwi_ctrlr->wrb_context[i], ulp_base_num); if (status != 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : wrbq create failed."); kfree(pwrb_arr); return status; } pwrb_context = &phwi_ctrlr->wrb_context[i]; BE_SET_CID_TO_CRI(i, pwrb_context->cid); } kfree(pwrb_arr); return 0; } static void free_wrb_handles(struct beiscsi_hba *phba) { unsigned int index; struct hwi_controller *phwi_ctrlr; struct hwi_wrb_context *pwrb_context; phwi_ctrlr = phba->phwi_ctrlr; for (index = 0; index < phba->params.cxns_per_ctrl; index++) { pwrb_context = &phwi_ctrlr->wrb_context[index]; kfree(pwrb_context->pwrb_handle_base); kfree(pwrb_context->pwrb_handle_basestd); } } static void be_mcc_queues_destroy(struct beiscsi_hba *phba) { struct be_ctrl_info *ctrl = &phba->ctrl; struct be_dma_mem *ptag_mem; struct be_queue_info *q; int i, tag; q = &phba->ctrl.mcc_obj.q; for (i = 0; i < MAX_MCC_CMD; i++) { tag = i + 1; if (!test_bit(MCC_TAG_STATE_RUNNING, &ctrl->ptag_state[tag].tag_state)) continue; if (test_bit(MCC_TAG_STATE_TIMEOUT, &ctrl->ptag_state[tag].tag_state)) { ptag_mem = &ctrl->ptag_state[tag].tag_mem_state; if (ptag_mem->size) { dma_free_coherent(&ctrl->pdev->dev, ptag_mem->size, ptag_mem->va, ptag_mem->dma); ptag_mem->size = 0; } continue; } /** * If MCC is still active and waiting then wake up the process. * We are here only because port is going offline. The process * sees that (BEISCSI_HBA_ONLINE is cleared) and EIO error is * returned for the operation and allocated memory cleaned up. */ if (waitqueue_active(&ctrl->mcc_wait[tag])) { ctrl->mcc_tag_status[tag] = MCC_STATUS_FAILED; ctrl->mcc_tag_status[tag] |= CQE_VALID_MASK; wake_up_interruptible(&ctrl->mcc_wait[tag]); /* * Control tag info gets reinitialized in enable * so wait for the process to clear running state. */ while (test_bit(MCC_TAG_STATE_RUNNING, &ctrl->ptag_state[tag].tag_state)) schedule_timeout_uninterruptible(HZ); } /** * For MCC with tag_states MCC_TAG_STATE_ASYNC and * MCC_TAG_STATE_IGNORE nothing needs to done. */ } if (q->created) { beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ); be_queue_free(phba, q); } q = &phba->ctrl.mcc_obj.cq; if (q->created) { beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); be_queue_free(phba, q); } } static int be_mcc_queues_create(struct beiscsi_hba *phba, struct hwi_context_memory *phwi_context) { struct be_queue_info *q, *cq; struct be_ctrl_info *ctrl = &phba->ctrl; /* Alloc MCC compl queue */ cq = &phba->ctrl.mcc_obj.cq; if (be_queue_alloc(phba, cq, MCC_CQ_LEN, sizeof(struct be_mcc_compl))) goto err; /* Ask BE to create MCC compl queue; */ if (phba->pcidev->msix_enabled) { if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[phba->num_cpus].q, false, true, 0)) goto mcc_cq_free; } else { if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q, false, true, 0)) goto mcc_cq_free; } /* Alloc MCC queue */ q = &phba->ctrl.mcc_obj.q; if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb))) goto mcc_cq_destroy; /* Ask BE to create MCC queue */ if (beiscsi_cmd_mccq_create(phba, q, cq)) goto mcc_q_free; return 0; mcc_q_free: be_queue_free(phba, q); mcc_cq_destroy: beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ); mcc_cq_free: be_queue_free(phba, cq); err: return -ENOMEM; } static void be2iscsi_enable_msix(struct beiscsi_hba *phba) { int nvec = 1; switch (phba->generation) { case BE_GEN2: case BE_GEN3: nvec = BEISCSI_MAX_NUM_CPUS + 1; break; case BE_GEN4: nvec = phba->fw_config.eqid_count; break; default: nvec = 2; break; } /* if eqid_count == 1 fall back to INTX */ if (enable_msix && nvec > 1) { struct irq_affinity desc = { .post_vectors = 1 }; if (pci_alloc_irq_vectors_affinity(phba->pcidev, 2, nvec, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc) < 0) { phba->num_cpus = nvec - 1; return; } } phba->num_cpus = 1; } static void hwi_purge_eq(struct beiscsi_hba *phba) { struct hwi_controller *phwi_ctrlr; struct hwi_context_memory *phwi_context; struct be_queue_info *eq; struct be_eq_entry *eqe = NULL; int i, eq_msix; unsigned int num_processed; if (beiscsi_hba_in_error(phba)) return; phwi_ctrlr = phba->phwi_ctrlr; phwi_context = phwi_ctrlr->phwi_ctxt; if (phba->pcidev->msix_enabled) eq_msix = 1; else eq_msix = 0; for (i = 0; i < (phba->num_cpus + eq_msix); i++) { eq = &phwi_context->be_eq[i].q; eqe = queue_tail_node(eq); num_processed = 0; while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] & EQE_VALID_MASK) { AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); queue_tail_inc(eq); eqe = queue_tail_node(eq); num_processed++; } if (num_processed) hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1); } } static void hwi_cleanup_port(struct beiscsi_hba *phba) { struct be_queue_info *q; struct be_ctrl_info *ctrl = &phba->ctrl; struct hwi_controller *phwi_ctrlr; struct hwi_context_memory *phwi_context; int i, eq_for_mcc, ulp_num; for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) beiscsi_cmd_iscsi_cleanup(phba, ulp_num); /** * Purge all EQ entries that may have been left out. This is to * workaround a problem we've seen occasionally where driver gets an * interrupt with EQ entry bit set after stopping the controller. */ hwi_purge_eq(phba); phwi_ctrlr = phba->phwi_ctrlr; phwi_context = phwi_ctrlr->phwi_ctxt; be_cmd_iscsi_remove_template_hdr(ctrl); for (i = 0; i < phba->params.cxns_per_ctrl; i++) { q = &phwi_context->be_wrbq[i]; if (q->created) beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ); } kfree(phwi_context->be_wrbq); free_wrb_handles(phba); for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { q = &phwi_context->be_def_hdrq[ulp_num]; if (q->created) beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); q = &phwi_context->be_def_dataq[ulp_num]; if (q->created) beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); } } beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL); for (i = 0; i < (phba->num_cpus); i++) { q = &phwi_context->be_cq[i]; if (q->created) { be_queue_free(phba, q); beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); } } be_mcc_queues_destroy(phba); if (phba->pcidev->msix_enabled) eq_for_mcc = 1; else eq_for_mcc = 0; for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { q = &phwi_context->be_eq[i].q; if (q->created) { be_queue_free(phba, q); beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ); } } /* this ensures complete FW cleanup */ beiscsi_cmd_function_reset(phba); /* last communication, indicate driver is unloading */ beiscsi_cmd_special_wrb(&phba->ctrl, 0); } static int hwi_init_port(struct beiscsi_hba *phba) { struct hwi_controller *phwi_ctrlr; struct hwi_context_memory *phwi_context; unsigned int def_pdu_ring_sz; struct be_ctrl_info *ctrl = &phba->ctrl; int status, ulp_num; u16 nbufs; phwi_ctrlr = phba->phwi_ctrlr; phwi_context = phwi_ctrlr->phwi_ctxt; /* set port optic state to unknown */ phba->optic_state = 0xff; status = beiscsi_create_eqs(phba, phwi_context); if (status != 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : EQ not created\n"); goto error; } status = be_mcc_queues_create(phba, phwi_context); if (status != 0) goto error; status = beiscsi_check_supported_fw(ctrl, phba); if (status != 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : Unsupported fw version\n"); goto error; } status = beiscsi_create_cqs(phba, phwi_context); if (status != 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : CQ not created\n"); goto error; } for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { nbufs = phwi_context->pasync_ctx[ulp_num]->num_entries; def_pdu_ring_sz = nbufs * sizeof(struct phys_addr); status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr, def_pdu_ring_sz, ulp_num); if (status != 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : Default Header not created for ULP : %d\n", ulp_num); goto error; } status = beiscsi_create_def_data(phba, phwi_context, phwi_ctrlr, def_pdu_ring_sz, ulp_num); if (status != 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : Default Data not created for ULP : %d\n", ulp_num); goto error; } /** * Now that the default PDU rings have been created, * let EP know about it. */ beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_HDR, ulp_num, nbufs); beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_DATA, ulp_num, nbufs); } } status = beiscsi_post_pages(phba); if (status != 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : Post SGL Pages Failed\n"); goto error; } status = beiscsi_post_template_hdr(phba); if (status != 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : Template HDR Posting for CXN Failed\n"); } status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr); if (status != 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : WRB Rings not created\n"); goto error; } for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { uint16_t async_arr_idx = 0; if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { uint16_t cri = 0; struct hd_async_context *pasync_ctx; pasync_ctx = HWI_GET_ASYNC_PDU_CTX( phwi_ctrlr, ulp_num); for (cri = 0; cri < phba->params.cxns_per_ctrl; cri++) { if (ulp_num == BEISCSI_GET_ULP_FROM_CRI (phwi_ctrlr, cri)) pasync_ctx->cid_to_async_cri_map[ phwi_ctrlr->wrb_context[cri].cid] = async_arr_idx++; } } } beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "BM_%d : hwi_init_port success\n"); return 0; error: beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : hwi_init_port failed"); hwi_cleanup_port(phba); return status; } static int hwi_init_controller(struct beiscsi_hba *phba) { struct hwi_controller *phwi_ctrlr; phwi_ctrlr = phba->phwi_ctrlr; if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) { phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba-> init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address; beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "BM_%d : phwi_ctrlr->phwi_ctxt=%p\n", phwi_ctrlr->phwi_ctxt); } else { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : HWI_MEM_ADDN_CONTEXT is more " "than one element.Failing to load\n"); return -ENOMEM; } iscsi_init_global_templates(phba); if (beiscsi_init_wrb_handle(phba)) return -ENOMEM; if (hwi_init_async_pdu_ctx(phba)) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : hwi_init_async_pdu_ctx failed\n"); return -ENOMEM; } if (hwi_init_port(phba) != 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : hwi_init_controller failed\n"); return -ENOMEM; } return 0; } static void beiscsi_free_mem(struct beiscsi_hba *phba) { struct be_mem_descriptor *mem_descr; int i, j; mem_descr = phba->init_mem; for (i = 0; i < SE_MEM_MAX; i++) { for (j = mem_descr->num_elements; j > 0; j--) { dma_free_coherent(&phba->pcidev->dev, mem_descr->mem_array[j - 1].size, mem_descr->mem_array[j - 1].virtual_address, (unsigned long)mem_descr->mem_array[j - 1]. bus_address.u.a64.address); } kfree(mem_descr->mem_array); mem_descr++; } kfree(phba->init_mem); kfree(phba->phwi_ctrlr->wrb_context); kfree(phba->phwi_ctrlr); } static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba) { struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg; struct sgl_handle *psgl_handle; struct iscsi_sge *pfrag; unsigned int arr_index, i, idx; unsigned int ulp_icd_start, ulp_num = 0; phba->io_sgl_hndl_avbl = 0; phba->eh_sgl_hndl_avbl = 0; mem_descr_sglh = phba->init_mem; mem_descr_sglh += HWI_MEM_SGLH; if (1 == mem_descr_sglh->num_elements) { phba->io_sgl_hndl_base = kcalloc(phba->params.ios_per_ctrl, sizeof(struct sgl_handle *), GFP_KERNEL); if (!phba->io_sgl_hndl_base) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : Mem Alloc Failed. Failing to load\n"); return -ENOMEM; } phba->eh_sgl_hndl_base = kcalloc(phba->params.icds_per_ctrl - phba->params.ios_per_ctrl, sizeof(struct sgl_handle *), GFP_KERNEL); if (!phba->eh_sgl_hndl_base) { kfree(phba->io_sgl_hndl_base); beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : Mem Alloc Failed. Failing to load\n"); return -ENOMEM; } } else { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : HWI_MEM_SGLH is more than one element." "Failing to load\n"); return -ENOMEM; } arr_index = 0; idx = 0; while (idx < mem_descr_sglh->num_elements) { psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address; for (i = 0; i < (mem_descr_sglh->mem_array[idx].size / sizeof(struct sgl_handle)); i++) { if (arr_index < phba->params.ios_per_ctrl) { phba->io_sgl_hndl_base[arr_index] = psgl_handle; phba->io_sgl_hndl_avbl++; arr_index++; } else { phba->eh_sgl_hndl_base[arr_index - phba->params.ios_per_ctrl] = psgl_handle; arr_index++; phba->eh_sgl_hndl_avbl++; } psgl_handle++; } idx++; } beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "BM_%d : phba->io_sgl_hndl_avbl=%d " "phba->eh_sgl_hndl_avbl=%d\n", phba->io_sgl_hndl_avbl, phba->eh_sgl_hndl_avbl); mem_descr_sg = phba->init_mem; mem_descr_sg += HWI_MEM_SGE; beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "\n BM_%d : mem_descr_sg->num_elements=%d\n", mem_descr_sg->num_elements); for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) break; ulp_icd_start = phba->fw_config.iscsi_icd_start[ulp_num]; arr_index = 0; idx = 0; while (idx < mem_descr_sg->num_elements) { pfrag = mem_descr_sg->mem_array[idx].virtual_address; for (i = 0; i < (mem_descr_sg->mem_array[idx].size) / (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io); i++) { if (arr_index < phba->params.ios_per_ctrl) psgl_handle = phba->io_sgl_hndl_base[arr_index]; else psgl_handle = phba->eh_sgl_hndl_base[arr_index - phba->params.ios_per_ctrl]; psgl_handle->pfrag = pfrag; AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0); AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0); pfrag += phba->params.num_sge_per_io; psgl_handle->sgl_index = ulp_icd_start + arr_index++; } idx++; } phba->io_sgl_free_index = 0; phba->io_sgl_alloc_index = 0; phba->eh_sgl_free_index = 0; phba->eh_sgl_alloc_index = 0; return 0; } static int hba_setup_cid_tbls(struct beiscsi_hba *phba) { int ret; uint16_t i, ulp_num; struct ulp_cid_info *ptr_cid_info = NULL; for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { ptr_cid_info = kzalloc(sizeof(struct ulp_cid_info), GFP_KERNEL); if (!ptr_cid_info) { ret = -ENOMEM; goto free_memory; } /* Allocate memory for CID array */ ptr_cid_info->cid_array = kcalloc(BEISCSI_GET_CID_COUNT(phba, ulp_num), sizeof(*ptr_cid_info->cid_array), GFP_KERNEL); if (!ptr_cid_info->cid_array) { kfree(ptr_cid_info); ptr_cid_info = NULL; ret = -ENOMEM; goto free_memory; } ptr_cid_info->avlbl_cids = BEISCSI_GET_CID_COUNT( phba, ulp_num); /* Save the cid_info_array ptr */ phba->cid_array_info[ulp_num] = ptr_cid_info; } } phba->ep_array = kcalloc(phba->params.cxns_per_ctrl, sizeof(struct iscsi_endpoint *), GFP_KERNEL); if (!phba->ep_array) { ret = -ENOMEM; goto free_memory; } phba->conn_table = kcalloc(phba->params.cxns_per_ctrl, sizeof(struct beiscsi_conn *), GFP_KERNEL); if (!phba->conn_table) { kfree(phba->ep_array); phba->ep_array = NULL; ret = -ENOMEM; goto free_memory; } for (i = 0; i < phba->params.cxns_per_ctrl; i++) { ulp_num = phba->phwi_ctrlr->wrb_context[i].ulp_num; ptr_cid_info = phba->cid_array_info[ulp_num]; ptr_cid_info->cid_array[ptr_cid_info->cid_alloc++] = phba->phwi_ctrlr->wrb_context[i].cid; } for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { ptr_cid_info = phba->cid_array_info[ulp_num]; ptr_cid_info->cid_alloc = 0; ptr_cid_info->cid_free = 0; } } return 0; free_memory: for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { ptr_cid_info = phba->cid_array_info[ulp_num]; if (ptr_cid_info) { kfree(ptr_cid_info->cid_array); kfree(ptr_cid_info); phba->cid_array_info[ulp_num] = NULL; } } } return ret; } static void hwi_enable_intr(struct beiscsi_hba *phba) { struct be_ctrl_info *ctrl = &phba->ctrl; struct hwi_controller *phwi_ctrlr; struct hwi_context_memory *phwi_context; struct be_queue_info *eq; u8 __iomem *addr; u32 reg, i; u32 enabled; phwi_ctrlr = phba->phwi_ctrlr; phwi_context = phwi_ctrlr->phwi_ctxt; addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET); reg = ioread32(addr); enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; if (!enabled) { reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "BM_%d : reg =x%08x addr=%p\n", reg, addr); iowrite32(reg, addr); } if (!phba->pcidev->msix_enabled) { eq = &phwi_context->be_eq[0].q; beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "BM_%d : eq->id=%d\n", eq->id); hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); } else { for (i = 0; i <= phba->num_cpus; i++) { eq = &phwi_context->be_eq[i].q; beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "BM_%d : eq->id=%d\n", eq->id); hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); } } } static void hwi_disable_intr(struct beiscsi_hba *phba) { struct be_ctrl_info *ctrl = &phba->ctrl; u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET; u32 reg = ioread32(addr); u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; if (enabled) { reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; iowrite32(reg, addr); } else beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, "BM_%d : In hwi_disable_intr, Already Disabled\n"); } static int beiscsi_init_port(struct beiscsi_hba *phba) { int ret; ret = hwi_init_controller(phba); if (ret < 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : init controller failed\n"); return ret; } ret = beiscsi_init_sgl_handle(phba); if (ret < 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : init sgl handles failed\n"); goto cleanup_port; } ret = hba_setup_cid_tbls(phba); if (ret < 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : setup CID table failed\n"); kfree(phba->io_sgl_hndl_base); kfree(phba->eh_sgl_hndl_base); goto cleanup_port; } return ret; cleanup_port: hwi_cleanup_port(phba); return ret; } static void beiscsi_cleanup_port(struct beiscsi_hba *phba) { struct ulp_cid_info *ptr_cid_info = NULL; int ulp_num; kfree(phba->io_sgl_hndl_base); kfree(phba->eh_sgl_hndl_base); kfree(phba->ep_array); kfree(phba->conn_table); for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { ptr_cid_info = phba->cid_array_info[ulp_num]; if (ptr_cid_info) { kfree(ptr_cid_info->cid_array); kfree(ptr_cid_info); phba->cid_array_info[ulp_num] = NULL; } } } } /** * beiscsi_free_mgmt_task_handles()- Free driver CXN resources * @beiscsi_conn: ptr to the conn to be cleaned up * @task: ptr to iscsi_task resource to be freed. * * Free driver mgmt resources binded to CXN. **/ void beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn, struct iscsi_task *task) { struct beiscsi_io_task *io_task; struct beiscsi_hba *phba = beiscsi_conn->phba; struct hwi_wrb_context *pwrb_context; struct hwi_controller *phwi_ctrlr; uint16_t cri_index = BE_GET_CRI_FROM_CID( beiscsi_conn->beiscsi_conn_cid); phwi_ctrlr = phba->phwi_ctrlr; pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; io_task = task->dd_data; if (io_task->pwrb_handle) { free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); io_task->pwrb_handle = NULL; } if (io_task->psgl_handle) { free_mgmt_sgl_handle(phba, io_task->psgl_handle); io_task->psgl_handle = NULL; } if (io_task->mtask_addr) { dma_unmap_single(&phba->pcidev->dev, io_task->mtask_addr, io_task->mtask_data_count, DMA_TO_DEVICE); io_task->mtask_addr = 0; } } /** * beiscsi_cleanup_task()- Free driver resources of the task * @task: ptr to the iscsi task * **/ static void beiscsi_cleanup_task(struct iscsi_task *task) { struct beiscsi_io_task *io_task = task->dd_data; struct iscsi_conn *conn = task->conn; struct beiscsi_conn *beiscsi_conn = conn->dd_data; struct beiscsi_hba *phba = beiscsi_conn->phba; struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; struct hwi_wrb_context *pwrb_context; struct hwi_controller *phwi_ctrlr; uint16_t cri_index = BE_GET_CRI_FROM_CID( beiscsi_conn->beiscsi_conn_cid); phwi_ctrlr = phba->phwi_ctrlr; pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; if (io_task->cmd_bhs) { dma_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, io_task->bhs_pa.u.a64.address); io_task->cmd_bhs = NULL; task->hdr = NULL; } if (task->sc) { if (io_task->pwrb_handle) { free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); io_task->pwrb_handle = NULL; } if (io_task->psgl_handle) { free_io_sgl_handle(phba, io_task->psgl_handle); io_task->psgl_handle = NULL; } if (io_task->scsi_cmnd) { if (io_task->num_sg) scsi_dma_unmap(io_task->scsi_cmnd); io_task->scsi_cmnd = NULL; } } else { if (!beiscsi_conn->login_in_progress) beiscsi_free_mgmt_task_handles(beiscsi_conn, task); } } void beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn, struct beiscsi_offload_params *params) { struct wrb_handle *pwrb_handle; struct hwi_wrb_context *pwrb_context = NULL; struct beiscsi_hba *phba = beiscsi_conn->phba; struct iscsi_task *task = beiscsi_conn->task; struct iscsi_session *session = task->conn->session; u32 doorbell = 0; /* * We can always use 0 here because it is reserved by libiscsi for * login/startup related tasks. */ beiscsi_conn->login_in_progress = 0; spin_lock_bh(&session->back_lock); beiscsi_cleanup_task(task); spin_unlock_bh(&session->back_lock); pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid, &pwrb_context); /* Check for the adapter family */ if (is_chip_be2_be3r(phba)) beiscsi_offload_cxn_v0(params, pwrb_handle, phba->init_mem, pwrb_context); else beiscsi_offload_cxn_v2(params, pwrb_handle, pwrb_context); be_dws_le_to_cpu(pwrb_handle->pwrb, sizeof(struct iscsi_target_context_update_wrb)); doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; iowrite32(doorbell, phba->db_va + beiscsi_conn->doorbell_offset); /* * There is no completion for CONTEXT_UPDATE. The completion of next * WRB posted guarantees FW's processing and DMA'ing of it. * Use beiscsi_put_wrb_handle to put it back in the pool which makes * sure zero'ing or reuse of the WRB only after wrbs_per_cxn. */ beiscsi_put_wrb_handle(pwrb_context, pwrb_handle, phba->params.wrbs_per_cxn); beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, "BM_%d : put CONTEXT_UPDATE pwrb_handle=%p free_index=0x%x wrb_handles_available=%d\n", pwrb_handle, pwrb_context->free_index, pwrb_context->wrb_handles_available); } static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt, int *index, int *age) { *index = (int)itt; if (age) *age = conn->session->age; } /** * beiscsi_alloc_pdu - allocates pdu and related resources * @task: libiscsi task * @opcode: opcode of pdu for task * * This is called with the session lock held. It will allocate * the wrb and sgl if needed for the command. And it will prep * the pdu's itt. beiscsi_parse_pdu will later translate * the pdu itt to the libiscsi task itt. */ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) { struct beiscsi_io_task *io_task = task->dd_data; struct iscsi_conn *conn = task->conn; struct beiscsi_conn *beiscsi_conn = conn->dd_data; struct beiscsi_hba *phba = beiscsi_conn->phba; struct hwi_wrb_context *pwrb_context; struct hwi_controller *phwi_ctrlr; itt_t itt; uint16_t cri_index = 0; struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; dma_addr_t paddr; io_task->cmd_bhs = dma_pool_alloc(beiscsi_sess->bhs_pool, GFP_ATOMIC, &paddr); if (!io_task->cmd_bhs) return -ENOMEM; io_task->bhs_pa.u.a64.address = paddr; io_task->libiscsi_itt = (itt_t)task->itt; io_task->conn = beiscsi_conn; task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr; task->hdr_max = sizeof(struct be_cmd_bhs); io_task->psgl_handle = NULL; io_task->pwrb_handle = NULL; if (task->sc) { io_task->psgl_handle = alloc_io_sgl_handle(phba); if (!io_task->psgl_handle) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, "BM_%d : Alloc of IO_SGL_ICD Failed " "for the CID : %d\n", beiscsi_conn->beiscsi_conn_cid); goto free_hndls; } io_task->pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid, &io_task->pwrb_context); if (!io_task->pwrb_handle) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, "BM_%d : Alloc of WRB_HANDLE Failed " "for the CID : %d\n", beiscsi_conn->beiscsi_conn_cid); goto free_io_hndls; } } else { io_task->scsi_cmnd = NULL; if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) { beiscsi_conn->task = task; if (!beiscsi_conn->login_in_progress) { io_task->psgl_handle = (struct sgl_handle *) alloc_mgmt_sgl_handle(phba); if (!io_task->psgl_handle) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, "BM_%d : Alloc of MGMT_SGL_ICD Failed " "for the CID : %d\n", beiscsi_conn->beiscsi_conn_cid); goto free_hndls; } beiscsi_conn->login_in_progress = 1; beiscsi_conn->plogin_sgl_handle = io_task->psgl_handle; io_task->pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid, &io_task->pwrb_context); if (!io_task->pwrb_handle) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, "BM_%d : Alloc of WRB_HANDLE Failed " "for the CID : %d\n", beiscsi_conn->beiscsi_conn_cid); goto free_mgmt_hndls; } beiscsi_conn->plogin_wrb_handle = io_task->pwrb_handle; } else { io_task->psgl_handle = beiscsi_conn->plogin_sgl_handle; io_task->pwrb_handle = beiscsi_conn->plogin_wrb_handle; } } else { io_task->psgl_handle = alloc_mgmt_sgl_handle(phba); if (!io_task->psgl_handle) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, "BM_%d : Alloc of MGMT_SGL_ICD Failed " "for the CID : %d\n", beiscsi_conn->beiscsi_conn_cid); goto free_hndls; } io_task->pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid, &io_task->pwrb_context); if (!io_task->pwrb_handle) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, "BM_%d : Alloc of WRB_HANDLE Failed " "for the CID : %d\n", beiscsi_conn->beiscsi_conn_cid); goto free_mgmt_hndls; } } } itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle-> wrb_index << 16) | (unsigned int) (io_task->psgl_handle->sgl_index)); io_task->pwrb_handle->pio_handle = task; io_task->cmd_bhs->iscsi_hdr.itt = itt; return 0; free_io_hndls: free_io_sgl_handle(phba, io_task->psgl_handle); goto free_hndls; free_mgmt_hndls: free_mgmt_sgl_handle(phba, io_task->psgl_handle); io_task->psgl_handle = NULL; free_hndls: phwi_ctrlr = phba->phwi_ctrlr; cri_index = BE_GET_CRI_FROM_CID( beiscsi_conn->beiscsi_conn_cid); pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; if (io_task->pwrb_handle) free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); io_task->pwrb_handle = NULL; dma_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, io_task->bhs_pa.u.a64.address); io_task->cmd_bhs = NULL; return -ENOMEM; } static int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg, unsigned int num_sg, unsigned int xferlen, unsigned int writedir) { struct beiscsi_io_task *io_task = task->dd_data; struct iscsi_conn *conn = task->conn; struct beiscsi_conn *beiscsi_conn = conn->dd_data; struct beiscsi_hba *phba = beiscsi_conn->phba; struct iscsi_wrb *pwrb = NULL; unsigned int doorbell = 0; pwrb = io_task->pwrb_handle->pwrb; io_task->bhs_len = sizeof(struct be_cmd_bhs); if (writedir) { AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb, INI_WR_CMD); AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 1); } else { AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb, INI_RD_CMD); AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 0); } io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb); AMAP_SET_BITS(struct amap_iscsi_wrb_v2, lun, pwrb, cpu_to_be16(*(unsigned short *) &io_task->cmd_bhs->iscsi_hdr.lun)); AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, xferlen); AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb, io_task->pwrb_handle->wrb_index); AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb, be32_to_cpu(task->cmdsn)); AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb, io_task->psgl_handle->sgl_index); hwi_write_sgl_v2(pwrb, sg, num_sg, io_task); AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb, io_task->pwrb_handle->wrb_index); if (io_task->pwrb_context->plast_wrb) AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, io_task->pwrb_context->plast_wrb, io_task->pwrb_handle->wrb_index); io_task->pwrb_context->plast_wrb = pwrb; be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; doorbell |= (io_task->pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; iowrite32(doorbell, phba->db_va + beiscsi_conn->doorbell_offset); return 0; } static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg, unsigned int num_sg, unsigned int xferlen, unsigned int writedir) { struct beiscsi_io_task *io_task = task->dd_data; struct iscsi_conn *conn = task->conn; struct beiscsi_conn *beiscsi_conn = conn->dd_data; struct beiscsi_hba *phba = beiscsi_conn->phba; struct iscsi_wrb *pwrb = NULL; unsigned int doorbell = 0; pwrb = io_task->pwrb_handle->pwrb; io_task->bhs_len = sizeof(struct be_cmd_bhs); if (writedir) { AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_WR_CMD); AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1); } else { AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_RD_CMD); AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); } io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb); AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb, cpu_to_be16(*(unsigned short *) &io_task->cmd_bhs->iscsi_hdr.lun)); AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen); AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, io_task->pwrb_handle->wrb_index); AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, be32_to_cpu(task->cmdsn)); AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb, io_task->psgl_handle->sgl_index); hwi_write_sgl(pwrb, sg, num_sg, io_task); AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, io_task->pwrb_handle->wrb_index); if (io_task->pwrb_context->plast_wrb) AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, io_task->pwrb_context->plast_wrb, io_task->pwrb_handle->wrb_index); io_task->pwrb_context->plast_wrb = pwrb; be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; doorbell |= (io_task->pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; iowrite32(doorbell, phba->db_va + beiscsi_conn->doorbell_offset); return 0; } static int beiscsi_mtask(struct iscsi_task *task) { struct beiscsi_io_task *io_task = task->dd_data; struct iscsi_conn *conn = task->conn; struct beiscsi_conn *beiscsi_conn = conn->dd_data; struct beiscsi_hba *phba = beiscsi_conn->phba; struct iscsi_wrb *pwrb = NULL; unsigned int doorbell = 0; unsigned int cid; unsigned int pwrb_typeoffset = 0; int ret = 0; cid = beiscsi_conn->beiscsi_conn_cid; pwrb = io_task->pwrb_handle->pwrb; if (is_chip_be2_be3r(phba)) { AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, be32_to_cpu(task->cmdsn)); AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, io_task->pwrb_handle->wrb_index); AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb, io_task->psgl_handle->sgl_index); AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, task->data_count); AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, io_task->pwrb_handle->wrb_index); if (io_task->pwrb_context->plast_wrb) AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, io_task->pwrb_context->plast_wrb, io_task->pwrb_handle->wrb_index); io_task->pwrb_context->plast_wrb = pwrb; pwrb_typeoffset = BE_WRB_TYPE_OFFSET; } else { AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb, be32_to_cpu(task->cmdsn)); AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb, io_task->pwrb_handle->wrb_index); AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb, io_task->psgl_handle->sgl_index); AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, task->data_count); AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb, io_task->pwrb_handle->wrb_index); if (io_task->pwrb_context->plast_wrb) AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, io_task->pwrb_context->plast_wrb, io_task->pwrb_handle->wrb_index); io_task->pwrb_context->plast_wrb = pwrb; pwrb_typeoffset = SKH_WRB_TYPE_OFFSET; } switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { case ISCSI_OP_LOGIN: AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1); ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); ret = hwi_write_buffer(pwrb, task); break; case ISCSI_OP_NOOP_OUT: if (task->hdr->ttt != ISCSI_RESERVED_TAG) { ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); if (is_chip_be2_be3r(phba)) AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1); else AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dmsg, pwrb, 1); } else { ADAPTER_SET_WRB_TYPE(pwrb, INI_RD_CMD, pwrb_typeoffset); if (is_chip_be2_be3r(phba)) AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); else AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dmsg, pwrb, 0); } ret = hwi_write_buffer(pwrb, task); break; case ISCSI_OP_TEXT: ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); ret = hwi_write_buffer(pwrb, task); break; case ISCSI_OP_SCSI_TMFUNC: ADAPTER_SET_WRB_TYPE(pwrb, INI_TMF_CMD, pwrb_typeoffset); ret = hwi_write_buffer(pwrb, task); break; case ISCSI_OP_LOGOUT: ADAPTER_SET_WRB_TYPE(pwrb, HWH_TYPE_LOGOUT, pwrb_typeoffset); ret = hwi_write_buffer(pwrb, task); break; default: beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, "BM_%d : opcode =%d Not supported\n", task->hdr->opcode & ISCSI_OPCODE_MASK); return -EINVAL; } if (ret) return ret; /* Set the task type */ io_task->wrb_type = (is_chip_be2_be3r(phba)) ? AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb) : AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb); doorbell |= cid & DB_WRB_POST_CID_MASK; doorbell |= (io_task->pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; iowrite32(doorbell, phba->db_va + beiscsi_conn->doorbell_offset); return 0; } static int beiscsi_task_xmit(struct iscsi_task *task) { struct beiscsi_io_task *io_task = task->dd_data; struct scsi_cmnd *sc = task->sc; struct beiscsi_hba *phba; struct scatterlist *sg; int num_sg; unsigned int writedir = 0, xferlen = 0; phba = io_task->conn->phba; /** * HBA in error includes BEISCSI_HBA_FW_TIMEOUT. IO path might be * operational if FW still gets heartbeat from EP FW. Is management * path really needed to continue further? */ if (!beiscsi_hba_is_online(phba)) return -EIO; if (!io_task->conn->login_in_progress) task->hdr->exp_statsn = 0; if (!sc) return beiscsi_mtask(task); io_task->scsi_cmnd = sc; io_task->num_sg = 0; num_sg = scsi_dma_map(sc); if (num_sg < 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO | BEISCSI_LOG_ISCSI, "BM_%d : scsi_dma_map Failed " "Driver_ITT : 0x%x ITT : 0x%x Xferlen : 0x%x\n", be32_to_cpu(io_task->cmd_bhs->iscsi_hdr.itt), io_task->libiscsi_itt, scsi_bufflen(sc)); return num_sg; } /** * For scsi cmd task, check num_sg before unmapping in cleanup_task. * For management task, cleanup_task checks mtask_addr before unmapping. */ io_task->num_sg = num_sg; xferlen = scsi_bufflen(sc); sg = scsi_sglist(sc); if (sc->sc_data_direction == DMA_TO_DEVICE) writedir = 1; else writedir = 0; return phba->iotask_fn(task, sg, num_sg, xferlen, writedir); } /** * beiscsi_bsg_request - handle bsg request from ISCSI transport * @job: job to handle */ static int beiscsi_bsg_request(struct bsg_job *job) { struct Scsi_Host *shost; struct beiscsi_hba *phba; struct iscsi_bsg_request *bsg_req = job->request; int rc = -EINVAL; unsigned int tag; struct be_dma_mem nonemb_cmd; struct be_cmd_resp_hdr *resp; struct iscsi_bsg_reply *bsg_reply = job->reply; unsigned short status, extd_status; shost = iscsi_job_to_shost(job); phba = iscsi_host_priv(shost); if (!beiscsi_hba_is_online(phba)) { beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BM_%d : HBA in error 0x%lx\n", phba->state); return -ENXIO; } switch (bsg_req->msgcode) { case ISCSI_BSG_HST_VENDOR: nonemb_cmd.va = dma_alloc_coherent(&phba->ctrl.pdev->dev, job->request_payload.payload_len, &nonemb_cmd.dma, GFP_KERNEL); if (nonemb_cmd.va == NULL) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, "BM_%d : Failed to allocate memory for " "beiscsi_bsg_request\n"); return -ENOMEM; } tag = mgmt_vendor_specific_fw_cmd(&phba->ctrl, phba, job, &nonemb_cmd); if (!tag) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, "BM_%d : MBX Tag Allocation Failed\n"); dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size, nonemb_cmd.va, nonemb_cmd.dma); return -EAGAIN; } rc = wait_event_interruptible_timeout( phba->ctrl.mcc_wait[tag], phba->ctrl.mcc_tag_status[tag], msecs_to_jiffies( BEISCSI_HOST_MBX_TIMEOUT)); if (!test_bit(BEISCSI_HBA_ONLINE, &phba->state)) { clear_bit(MCC_TAG_STATE_RUNNING, &phba->ctrl.ptag_state[tag].tag_state); dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size, nonemb_cmd.va, nonemb_cmd.dma); return -EIO; } extd_status = (phba->ctrl.mcc_tag_status[tag] & CQE_STATUS_ADDL_MASK) >> CQE_STATUS_ADDL_SHIFT; status = phba->ctrl.mcc_tag_status[tag] & CQE_STATUS_MASK; free_mcc_wrb(&phba->ctrl, tag); resp = (struct be_cmd_resp_hdr *)nonemb_cmd.va; sg_copy_from_buffer(job->reply_payload.sg_list, job->reply_payload.sg_cnt, nonemb_cmd.va, (resp->response_length + sizeof(*resp))); bsg_reply->reply_payload_rcv_len = resp->response_length; bsg_reply->result = status; bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size, nonemb_cmd.va, nonemb_cmd.dma); if (status || extd_status) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, "BM_%d : MBX Cmd Failed" " status = %d extd_status = %d\n", status, extd_status); return -EIO; } else { rc = 0; } break; default: beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, "BM_%d : Unsupported bsg command: 0x%x\n", bsg_req->msgcode); break; } return rc; } static void beiscsi_hba_attrs_init(struct beiscsi_hba *phba) { /* Set the logging parameter */ beiscsi_log_enable_init(phba, beiscsi_log_enable); } void beiscsi_start_boot_work(struct beiscsi_hba *phba, unsigned int s_handle) { if (phba->boot_struct.boot_kset) return; /* skip if boot work is already in progress */ if (test_and_set_bit(BEISCSI_HBA_BOOT_WORK, &phba->state)) return; phba->boot_struct.retry = 3; phba->boot_struct.tag = 0; phba->boot_struct.s_handle = s_handle; phba->boot_struct.action = BEISCSI_BOOT_GET_SHANDLE; schedule_work(&phba->boot_work); } #define BEISCSI_SYSFS_ISCSI_BOOT_FLAGS 3 /* * beiscsi_show_boot_tgt_info() * Boot flag info for iscsi-utilities * Bit 0 Block valid flag * Bit 1 Firmware booting selected */ static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf) { struct beiscsi_hba *phba = data; struct mgmt_session_info *boot_sess = &phba->boot_struct.boot_sess; struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0]; char *str = buf; int rc = -EPERM; switch (type) { case ISCSI_BOOT_TGT_NAME: rc = sprintf(buf, "%.*s\n", (int)strlen(boot_sess->target_name), (char *)&boot_sess->target_name); break; case ISCSI_BOOT_TGT_IP_ADDR: if (boot_conn->dest_ipaddr.ip_type == BEISCSI_IP_TYPE_V4) rc = sprintf(buf, "%pI4\n", (char *)&boot_conn->dest_ipaddr.addr); else rc = sprintf(str, "%pI6\n", (char *)&boot_conn->dest_ipaddr.addr); break; case ISCSI_BOOT_TGT_PORT: rc = sprintf(str, "%d\n", boot_conn->dest_port); break; case ISCSI_BOOT_TGT_CHAP_NAME: rc = sprintf(str, "%.*s\n", boot_conn->negotiated_login_options.auth_data.chap. target_chap_name_length, (char *)&boot_conn->negotiated_login_options. auth_data.chap.target_chap_name); break; case ISCSI_BOOT_TGT_CHAP_SECRET: rc = sprintf(str, "%.*s\n", boot_conn->negotiated_login_options.auth_data.chap. target_secret_length, (char *)&boot_conn->negotiated_login_options. auth_data.chap.target_secret); break; case ISCSI_BOOT_TGT_REV_CHAP_NAME: rc = sprintf(str, "%.*s\n", boot_conn->negotiated_login_options.auth_data.chap. intr_chap_name_length, (char *)&boot_conn->negotiated_login_options. auth_data.chap.intr_chap_name); break; case ISCSI_BOOT_TGT_REV_CHAP_SECRET: rc = sprintf(str, "%.*s\n", boot_conn->negotiated_login_options.auth_data.chap. intr_secret_length, (char *)&boot_conn->negotiated_login_options. auth_data.chap.intr_secret); break; case ISCSI_BOOT_TGT_FLAGS: rc = sprintf(str, "%d\n", BEISCSI_SYSFS_ISCSI_BOOT_FLAGS); break; case ISCSI_BOOT_TGT_NIC_ASSOC: rc = sprintf(str, "0\n"); break; } return rc; } static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf) { struct beiscsi_hba *phba = data; char *str = buf; int rc = -EPERM; switch (type) { case ISCSI_BOOT_INI_INITIATOR_NAME: rc = sprintf(str, "%s\n", phba->boot_struct.boot_sess.initiator_iscsiname); break; } return rc; } static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf) { struct beiscsi_hba *phba = data; char *str = buf; int rc = -EPERM; switch (type) { case ISCSI_BOOT_ETH_FLAGS: rc = sprintf(str, "%d\n", BEISCSI_SYSFS_ISCSI_BOOT_FLAGS); break; case ISCSI_BOOT_ETH_INDEX: rc = sprintf(str, "0\n"); break; case ISCSI_BOOT_ETH_MAC: rc = beiscsi_get_macaddr(str, phba); break; } return rc; } static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type) { umode_t rc = 0; switch (type) { case ISCSI_BOOT_TGT_NAME: case ISCSI_BOOT_TGT_IP_ADDR: case ISCSI_BOOT_TGT_PORT: case ISCSI_BOOT_TGT_CHAP_NAME: case ISCSI_BOOT_TGT_CHAP_SECRET: case ISCSI_BOOT_TGT_REV_CHAP_NAME: case ISCSI_BOOT_TGT_REV_CHAP_SECRET: case ISCSI_BOOT_TGT_NIC_ASSOC: case ISCSI_BOOT_TGT_FLAGS: rc = S_IRUGO; break; } return rc; } static umode_t beiscsi_ini_get_attr_visibility(void *data, int type) { umode_t rc = 0; switch (type) { case ISCSI_BOOT_INI_INITIATOR_NAME: rc = S_IRUGO; break; } return rc; } static umode_t beiscsi_eth_get_attr_visibility(void *data, int type) { umode_t rc = 0; switch (type) { case ISCSI_BOOT_ETH_FLAGS: case ISCSI_BOOT_ETH_MAC: case ISCSI_BOOT_ETH_INDEX: rc = S_IRUGO; break; } return rc; } static void beiscsi_boot_kobj_release(void *data) { struct beiscsi_hba *phba = data; scsi_host_put(phba->shost); } static int beiscsi_boot_create_kset(struct beiscsi_hba *phba) { struct boot_struct *bs = &phba->boot_struct; struct iscsi_boot_kobj *boot_kobj; if (bs->boot_kset) { __beiscsi_log(phba, KERN_ERR, "BM_%d: boot_kset already created\n"); return 0; } bs->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no); if (!bs->boot_kset) { __beiscsi_log(phba, KERN_ERR, "BM_%d: boot_kset alloc failed\n"); return -ENOMEM; } /* get shost ref because the show function will refer phba */ if (!scsi_host_get(phba->shost)) goto free_kset; boot_kobj = iscsi_boot_create_target(bs->boot_kset, 0, phba, beiscsi_show_boot_tgt_info, beiscsi_tgt_get_attr_visibility, beiscsi_boot_kobj_release); if (!boot_kobj) goto put_shost; if (!scsi_host_get(phba->shost)) goto free_kset; boot_kobj = iscsi_boot_create_initiator(bs->boot_kset, 0, phba, beiscsi_show_boot_ini_info, beiscsi_ini_get_attr_visibility, beiscsi_boot_kobj_release); if (!boot_kobj) goto put_shost; if (!scsi_host_get(phba->shost)) goto free_kset; boot_kobj = iscsi_boot_create_ethernet(bs->boot_kset, 0, phba, beiscsi_show_boot_eth_info, beiscsi_eth_get_attr_visibility, beiscsi_boot_kobj_release); if (!boot_kobj) goto put_shost; return 0; put_shost: scsi_host_put(phba->shost); free_kset: iscsi_boot_destroy_kset(bs->boot_kset); bs->boot_kset = NULL; return -ENOMEM; } static void beiscsi_boot_work(struct work_struct *work) { struct beiscsi_hba *phba = container_of(work, struct beiscsi_hba, boot_work); struct boot_struct *bs = &phba->boot_struct; unsigned int tag = 0; if (!beiscsi_hba_is_online(phba)) return; beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, "BM_%d : %s action %d\n", __func__, phba->boot_struct.action); switch (phba->boot_struct.action) { case BEISCSI_BOOT_REOPEN_SESS: tag = beiscsi_boot_reopen_sess(phba); break; case BEISCSI_BOOT_GET_SHANDLE: tag = __beiscsi_boot_get_shandle(phba, 1); break; case BEISCSI_BOOT_GET_SINFO: tag = beiscsi_boot_get_sinfo(phba); break; case BEISCSI_BOOT_LOGOUT_SESS: tag = beiscsi_boot_logout_sess(phba); break; case BEISCSI_BOOT_CREATE_KSET: beiscsi_boot_create_kset(phba); /** * updated boot_kset is made visible to all before * ending the boot work. */ mb(); clear_bit(BEISCSI_HBA_BOOT_WORK, &phba->state); return; } if (!tag) { if (bs->retry--) schedule_work(&phba->boot_work); else clear_bit(BEISCSI_HBA_BOOT_WORK, &phba->state); } } static void beiscsi_eqd_update_work(struct work_struct *work) { struct hwi_context_memory *phwi_context; struct be_set_eqd set_eqd[MAX_CPUS]; struct hwi_controller *phwi_ctrlr; struct be_eq_obj *pbe_eq; struct beiscsi_hba *phba; unsigned int pps, delta; struct be_aic_obj *aic; int eqd, i, num = 0; unsigned long now; phba = container_of(work, struct beiscsi_hba, eqd_update.work); if (!beiscsi_hba_is_online(phba)) return; phwi_ctrlr = phba->phwi_ctrlr; phwi_context = phwi_ctrlr->phwi_ctxt; for (i = 0; i <= phba->num_cpus; i++) { aic = &phba->aic_obj[i]; pbe_eq = &phwi_context->be_eq[i]; now = jiffies; if (!aic->jiffies || time_before(now, aic->jiffies) || pbe_eq->cq_count < aic->eq_prev) { aic->jiffies = now; aic->eq_prev = pbe_eq->cq_count; continue; } delta = jiffies_to_msecs(now - aic->jiffies); pps = (((u32)(pbe_eq->cq_count - aic->eq_prev) * 1000) / delta); eqd = (pps / 1500) << 2; if (eqd < 8) eqd = 0; eqd = min_t(u32, eqd, BEISCSI_EQ_DELAY_MAX); eqd = max_t(u32, eqd, BEISCSI_EQ_DELAY_MIN); aic->jiffies = now; aic->eq_prev = pbe_eq->cq_count; if (eqd != aic->prev_eqd) { set_eqd[num].delay_multiplier = (eqd * 65)/100; set_eqd[num].eq_id = pbe_eq->q.id; aic->prev_eqd = eqd; num++; } } if (num) /* completion of this is ignored */ beiscsi_modify_eq_delay(phba, set_eqd, num); schedule_delayed_work(&phba->eqd_update, msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL)); } static void beiscsi_hw_tpe_check(struct timer_list *t) { struct beiscsi_hba *phba = from_timer(phba, t, hw_check); u32 wait; /* if not TPE, do nothing */ if (!beiscsi_detect_tpe(phba)) return; /* wait default 4000ms before recovering */ wait = 4000; if (phba->ue2rp > BEISCSI_UE_DETECT_INTERVAL) wait = phba->ue2rp - BEISCSI_UE_DETECT_INTERVAL; queue_delayed_work(phba->wq, &phba->recover_port, msecs_to_jiffies(wait)); } static void beiscsi_hw_health_check(struct timer_list *t) { struct beiscsi_hba *phba = from_timer(phba, t, hw_check); beiscsi_detect_ue(phba); if (beiscsi_detect_ue(phba)) { __beiscsi_log(phba, KERN_ERR, "BM_%d : port in error: %lx\n", phba->state); /* sessions are no longer valid, so first fail the sessions */ queue_work(phba->wq, &phba->sess_work); /* detect UER supported */ if (!test_bit(BEISCSI_HBA_UER_SUPP, &phba->state)) return; /* modify this timer to check TPE */ phba->hw_check.function = beiscsi_hw_tpe_check; } mod_timer(&phba->hw_check, jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL)); } /* * beiscsi_enable_port()- Enables the disabled port. * Only port resources freed in disable function are reallocated. * This is called in HBA error handling path. * * @phba: Instance of driver private structure * **/ static int beiscsi_enable_port(struct beiscsi_hba *phba) { struct hwi_context_memory *phwi_context; struct hwi_controller *phwi_ctrlr; struct be_eq_obj *pbe_eq; int ret, i; if (test_bit(BEISCSI_HBA_ONLINE, &phba->state)) { __beiscsi_log(phba, KERN_ERR, "BM_%d : %s : port is online %lx\n", __func__, phba->state); return 0; } ret = beiscsi_init_sliport(phba); if (ret) return ret; be2iscsi_enable_msix(phba); beiscsi_get_params(phba); beiscsi_set_host_data(phba); /* Re-enable UER. If different TPE occurs then it is recoverable. */ beiscsi_set_uer_feature(phba); phba->shost->max_id = phba->params.cxns_per_ctrl - 1; phba->shost->can_queue = phba->params.ios_per_ctrl; ret = beiscsi_init_port(phba); if (ret < 0) { __beiscsi_log(phba, KERN_ERR, "BM_%d : init port failed\n"); goto disable_msix; } for (i = 0; i < MAX_MCC_CMD; i++) { init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]); phba->ctrl.mcc_tag[i] = i + 1; phba->ctrl.mcc_tag_status[i + 1] = 0; phba->ctrl.mcc_tag_available++; } phwi_ctrlr = phba->phwi_ctrlr; phwi_context = phwi_ctrlr->phwi_ctxt; for (i = 0; i < phba->num_cpus; i++) { pbe_eq = &phwi_context->be_eq[i]; irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget, be_iopoll); } i = (phba->pcidev->msix_enabled) ? i : 0; /* Work item for MCC handling */ pbe_eq = &phwi_context->be_eq[i]; INIT_WORK(&pbe_eq->mcc_work, beiscsi_mcc_work); ret = beiscsi_init_irqs(phba); if (ret < 0) { __beiscsi_log(phba, KERN_ERR, "BM_%d : setup IRQs failed %d\n", ret); goto cleanup_port; } hwi_enable_intr(phba); /* port operational: clear all error bits */ set_bit(BEISCSI_HBA_ONLINE, &phba->state); __beiscsi_log(phba, KERN_INFO, "BM_%d : port online: 0x%lx\n", phba->state); /* start hw_check timer and eqd_update work */ schedule_delayed_work(&phba->eqd_update, msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL)); /** * Timer function gets modified for TPE detection. * Always reinit to do health check first. */ phba->hw_check.function = beiscsi_hw_health_check; mod_timer(&phba->hw_check, jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL)); return 0; cleanup_port: for (i = 0; i < phba->num_cpus; i++) { pbe_eq = &phwi_context->be_eq[i]; irq_poll_disable(&pbe_eq->iopoll); } hwi_cleanup_port(phba); disable_msix: pci_free_irq_vectors(phba->pcidev); return ret; } /* * beiscsi_disable_port()- Disable port and cleanup driver resources. * This is called in HBA error handling and driver removal. * @phba: Instance Priv structure * @unload: indicate driver is unloading * * Free the OS and HW resources held by the driver **/ static void beiscsi_disable_port(struct beiscsi_hba *phba, int unload) { struct hwi_context_memory *phwi_context; struct hwi_controller *phwi_ctrlr; struct be_eq_obj *pbe_eq; unsigned int i; if (!test_and_clear_bit(BEISCSI_HBA_ONLINE, &phba->state)) return; phwi_ctrlr = phba->phwi_ctrlr; phwi_context = phwi_ctrlr->phwi_ctxt; hwi_disable_intr(phba); beiscsi_free_irqs(phba); pci_free_irq_vectors(phba->pcidev); for (i = 0; i < phba->num_cpus; i++) { pbe_eq = &phwi_context->be_eq[i]; irq_poll_disable(&pbe_eq->iopoll); } cancel_delayed_work_sync(&phba->eqd_update); cancel_work_sync(&phba->boot_work); /* WQ might be running cancel queued mcc_work if we are not exiting */ if (!unload && beiscsi_hba_in_error(phba)) { pbe_eq = &phwi_context->be_eq[i]; cancel_work_sync(&pbe_eq->mcc_work); } hwi_cleanup_port(phba); beiscsi_cleanup_port(phba); } static void beiscsi_sess_work(struct work_struct *work) { struct beiscsi_hba *phba; phba = container_of(work, struct beiscsi_hba, sess_work); /* * This work gets scheduled only in case of HBA error. * Old sessions are gone so need to be re-established. * iscsi_session_failure needs process context hence this work. */ iscsi_host_for_each_session(phba->shost, beiscsi_session_fail); } static void beiscsi_recover_port(struct work_struct *work) { struct beiscsi_hba *phba; phba = container_of(work, struct beiscsi_hba, recover_port.work); beiscsi_disable_port(phba, 0); beiscsi_enable_port(phba); } static pci_ers_result_t beiscsi_eeh_err_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct beiscsi_hba *phba = NULL; phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); set_bit(BEISCSI_HBA_PCI_ERR, &phba->state); beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : EEH error detected\n"); /* first stop UE detection when PCI error detected */ del_timer_sync(&phba->hw_check); cancel_delayed_work_sync(&phba->recover_port); /* sessions are no longer valid, so first fail the sessions */ iscsi_host_for_each_session(phba->shost, beiscsi_session_fail); beiscsi_disable_port(phba, 0); if (state == pci_channel_io_perm_failure) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : EEH : State PERM Failure"); return PCI_ERS_RESULT_DISCONNECT; } pci_disable_device(pdev); /* The error could cause the FW to trigger a flash debug dump. * Resetting the card while flash dump is in progress * can cause it not to recover; wait for it to finish. * Wait only for first function as it is needed only once per * adapter. **/ if (pdev->devfn == 0) ssleep(30); return PCI_ERS_RESULT_NEED_RESET; } static pci_ers_result_t beiscsi_eeh_reset(struct pci_dev *pdev) { struct beiscsi_hba *phba = NULL; int status = 0; phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : EEH Reset\n"); status = pci_enable_device(pdev); if (status) return PCI_ERS_RESULT_DISCONNECT; pci_set_master(pdev); pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); status = beiscsi_check_fw_rdy(phba); if (status) { beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, "BM_%d : EEH Reset Completed\n"); } else { beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, "BM_%d : EEH Reset Completion Failure\n"); return PCI_ERS_RESULT_DISCONNECT; } return PCI_ERS_RESULT_RECOVERED; } static void beiscsi_eeh_resume(struct pci_dev *pdev) { struct beiscsi_hba *phba; int ret; phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); pci_save_state(pdev); ret = beiscsi_enable_port(phba); if (ret) __beiscsi_log(phba, KERN_ERR, "BM_%d : AER EEH resume failed\n"); } static int beiscsi_dev_probe(struct pci_dev *pcidev, const struct pci_device_id *id) { struct hwi_context_memory *phwi_context; struct hwi_controller *phwi_ctrlr; struct beiscsi_hba *phba = NULL; struct be_eq_obj *pbe_eq; unsigned int s_handle; char wq_name[20]; int ret, i; ret = beiscsi_enable_pci(pcidev); if (ret < 0) { dev_err(&pcidev->dev, "beiscsi_dev_probe - Failed to enable pci device\n"); return ret; } phba = beiscsi_hba_alloc(pcidev); if (!phba) { dev_err(&pcidev->dev, "beiscsi_dev_probe - Failed in beiscsi_hba_alloc\n"); ret = -ENOMEM; goto disable_pci; } pci_save_state(pcidev); /* Initialize Driver configuration Paramters */ beiscsi_hba_attrs_init(phba); phba->mac_addr_set = false; switch (pcidev->device) { case BE_DEVICE_ID1: case OC_DEVICE_ID1: case OC_DEVICE_ID2: phba->generation = BE_GEN2; phba->iotask_fn = beiscsi_iotask; dev_warn(&pcidev->dev, "Obsolete/Unsupported BE2 Adapter Family\n"); break; case BE_DEVICE_ID2: case OC_DEVICE_ID3: phba->generation = BE_GEN3; phba->iotask_fn = beiscsi_iotask; break; case OC_SKH_ID1: phba->generation = BE_GEN4; phba->iotask_fn = beiscsi_iotask_v2; break; default: phba->generation = 0; } ret = be_ctrl_init(phba, pcidev); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : be_ctrl_init failed\n"); goto free_hba; } ret = beiscsi_init_sliport(phba); if (ret) goto free_hba; spin_lock_init(&phba->io_sgl_lock); spin_lock_init(&phba->mgmt_sgl_lock); spin_lock_init(&phba->async_pdu_lock); ret = beiscsi_get_fw_config(&phba->ctrl, phba); if (ret != 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : Error getting fw config\n"); goto free_port; } beiscsi_get_port_name(&phba->ctrl, phba); beiscsi_get_params(phba); beiscsi_set_host_data(phba); beiscsi_set_uer_feature(phba); be2iscsi_enable_msix(phba); beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "BM_%d : num_cpus = %d\n", phba->num_cpus); phba->shost->max_id = phba->params.cxns_per_ctrl; phba->shost->can_queue = phba->params.ios_per_ctrl; ret = beiscsi_get_memory(phba); if (ret < 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : alloc host mem failed\n"); goto free_port; } ret = beiscsi_init_port(phba); if (ret < 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : init port failed\n"); beiscsi_free_mem(phba); goto free_port; } for (i = 0; i < MAX_MCC_CMD; i++) { init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]); phba->ctrl.mcc_tag[i] = i + 1; phba->ctrl.mcc_tag_status[i + 1] = 0; phba->ctrl.mcc_tag_available++; memset(&phba->ctrl.ptag_state[i].tag_mem_state, 0, sizeof(struct be_dma_mem)); } phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0; snprintf(wq_name, sizeof(wq_name), "beiscsi_%02x_wq", phba->shost->host_no); phba->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, wq_name); if (!phba->wq) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : beiscsi_dev_probe-" "Failed to allocate work queue\n"); ret = -ENOMEM; goto free_twq; } INIT_DELAYED_WORK(&phba->eqd_update, beiscsi_eqd_update_work); phwi_ctrlr = phba->phwi_ctrlr; phwi_context = phwi_ctrlr->phwi_ctxt; for (i = 0; i < phba->num_cpus; i++) { pbe_eq = &phwi_context->be_eq[i]; irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget, be_iopoll); } i = (phba->pcidev->msix_enabled) ? i : 0; /* Work item for MCC handling */ pbe_eq = &phwi_context->be_eq[i]; INIT_WORK(&pbe_eq->mcc_work, beiscsi_mcc_work); ret = beiscsi_init_irqs(phba); if (ret < 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : beiscsi_dev_probe-" "Failed to beiscsi_init_irqs\n"); goto disable_iopoll; } hwi_enable_intr(phba); ret = iscsi_host_add(phba->shost, &phba->pcidev->dev); if (ret) goto free_irqs; /* set online bit after port is operational */ set_bit(BEISCSI_HBA_ONLINE, &phba->state); __beiscsi_log(phba, KERN_INFO, "BM_%d : port online: 0x%lx\n", phba->state); INIT_WORK(&phba->boot_work, beiscsi_boot_work); ret = beiscsi_boot_get_shandle(phba, &s_handle); if (ret > 0) { beiscsi_start_boot_work(phba, s_handle); /** * Set this bit after starting the work to let * probe handle it first. * ASYNC event can too schedule this work. */ set_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state); } beiscsi_iface_create_default(phba); schedule_delayed_work(&phba->eqd_update, msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL)); INIT_WORK(&phba->sess_work, beiscsi_sess_work); INIT_DELAYED_WORK(&phba->recover_port, beiscsi_recover_port); /** * Start UE detection here. UE before this will cause stall in probe * and eventually fail the probe. */ timer_setup(&phba->hw_check, beiscsi_hw_health_check, 0); mod_timer(&phba->hw_check, jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL)); beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n"); return 0; free_irqs: hwi_disable_intr(phba); beiscsi_free_irqs(phba); disable_iopoll: for (i = 0; i < phba->num_cpus; i++) { pbe_eq = &phwi_context->be_eq[i]; irq_poll_disable(&pbe_eq->iopoll); } destroy_workqueue(phba->wq); free_twq: hwi_cleanup_port(phba); beiscsi_cleanup_port(phba); beiscsi_free_mem(phba); free_port: dma_free_coherent(&phba->pcidev->dev, phba->ctrl.mbox_mem_alloced.size, phba->ctrl.mbox_mem_alloced.va, phba->ctrl.mbox_mem_alloced.dma); beiscsi_unmap_pci_function(phba); free_hba: pci_disable_msix(phba->pcidev); pci_dev_put(phba->pcidev); iscsi_host_free(phba->shost); pci_set_drvdata(pcidev, NULL); disable_pci: pci_release_regions(pcidev); pci_disable_device(pcidev); return ret; } static void beiscsi_remove(struct pci_dev *pcidev) { struct beiscsi_hba *phba = NULL; phba = pci_get_drvdata(pcidev); if (!phba) { dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n"); return; } /* first stop UE detection before unloading */ del_timer_sync(&phba->hw_check); cancel_delayed_work_sync(&phba->recover_port); cancel_work_sync(&phba->sess_work); beiscsi_iface_destroy_default(phba); iscsi_host_remove(phba->shost, false); beiscsi_disable_port(phba, 1); /* after cancelling boot_work */ iscsi_boot_destroy_kset(phba->boot_struct.boot_kset); /* free all resources */ destroy_workqueue(phba->wq); beiscsi_free_mem(phba); /* ctrl uninit */ beiscsi_unmap_pci_function(phba); dma_free_coherent(&phba->pcidev->dev, phba->ctrl.mbox_mem_alloced.size, phba->ctrl.mbox_mem_alloced.va, phba->ctrl.mbox_mem_alloced.dma); pci_dev_put(phba->pcidev); iscsi_host_free(phba->shost); pci_set_drvdata(pcidev, NULL); pci_release_regions(pcidev); pci_disable_device(pcidev); } static struct pci_error_handlers beiscsi_eeh_handlers = { .error_detected = beiscsi_eeh_err_detected, .slot_reset = beiscsi_eeh_reset, .resume = beiscsi_eeh_resume, }; struct iscsi_transport beiscsi_iscsi_transport = { .owner = THIS_MODULE, .name = DRV_NAME, .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO | CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD, .create_session = beiscsi_session_create, .destroy_session = beiscsi_session_destroy, .create_conn = beiscsi_conn_create, .bind_conn = beiscsi_conn_bind, .unbind_conn = iscsi_conn_unbind, .destroy_conn = iscsi_conn_teardown, .attr_is_visible = beiscsi_attr_is_visible, .set_iface_param = beiscsi_iface_set_param, .get_iface_param = beiscsi_iface_get_param, .set_param = beiscsi_set_param, .get_conn_param = iscsi_conn_get_param, .get_session_param = iscsi_session_get_param, .get_host_param = beiscsi_get_host_param, .start_conn = beiscsi_conn_start, .stop_conn = iscsi_conn_stop, .send_pdu = iscsi_conn_send_pdu, .xmit_task = beiscsi_task_xmit, .cleanup_task = beiscsi_cleanup_task, .alloc_pdu = beiscsi_alloc_pdu, .parse_pdu_itt = beiscsi_parse_pdu, .get_stats = beiscsi_conn_get_stats, .get_ep_param = beiscsi_ep_get_param, .ep_connect = beiscsi_ep_connect, .ep_poll = beiscsi_ep_poll, .ep_disconnect = beiscsi_ep_disconnect, .session_recovery_timedout = iscsi_session_recovery_timedout, .bsg_request = beiscsi_bsg_request, }; static struct pci_driver beiscsi_pci_driver = { .name = DRV_NAME, .probe = beiscsi_dev_probe, .remove = beiscsi_remove, .id_table = beiscsi_pci_id_table, .err_handler = &beiscsi_eeh_handlers }; static int __init beiscsi_module_init(void) { int ret; beiscsi_scsi_transport = iscsi_register_transport(&beiscsi_iscsi_transport); if (!beiscsi_scsi_transport) { printk(KERN_ERR "beiscsi_module_init - Unable to register beiscsi transport.\n"); return -ENOMEM; } printk(KERN_INFO "In beiscsi_module_init, tt=%p\n", &beiscsi_iscsi_transport); ret = pci_register_driver(&beiscsi_pci_driver); if (ret) { printk(KERN_ERR "beiscsi_module_init - Unable to register beiscsi pci driver.\n"); goto unregister_iscsi_transport; } return 0; unregister_iscsi_transport: iscsi_unregister_transport(&beiscsi_iscsi_transport); return ret; } static void __exit beiscsi_module_exit(void) { pci_unregister_driver(&beiscsi_pci_driver); iscsi_unregister_transport(&beiscsi_iscsi_transport); } module_init(beiscsi_module_init); module_exit(beiscsi_module_exit);
linux-master
drivers/scsi/be2iscsi/be_main.c
/* * This file is part of the Emulex Linux Device Driver for Enterprise iSCSI * Host Bus Adapters. Refer to the README file included with this package * for driver version and adapter compatibility. * * Copyright (c) 2018 Broadcom. All Rights Reserved. * The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as published * by the Free Software Foundation. * * This program is distributed in the hope that it will be useful. ALL EXPRESS * OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ANY * IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, * OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH * DISCLAIMERS ARE HELD TO BE LEGALLY INVALID. * See the GNU General Public License for more details, a copy of which * can be found in the file COPYING included with this package. * * Contact Information: * [email protected] * */ #include <linux/bsg-lib.h> #include <scsi/scsi_transport_iscsi.h> #include <scsi/scsi_bsg_iscsi.h> #include "be_mgmt.h" #include "be_iscsi.h" #include "be_main.h" unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba, struct bsg_job *job, struct be_dma_mem *nonemb_cmd) { struct be_mcc_wrb *wrb; struct be_sge *mcc_sge; unsigned int tag = 0; struct iscsi_bsg_request *bsg_req = job->request; struct be_bsg_vendor_cmd *req = nonemb_cmd->va; unsigned short region, sector_size, sector, offset; nonemb_cmd->size = job->request_payload.payload_len; memset(nonemb_cmd->va, 0, nonemb_cmd->size); region = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; sector_size = bsg_req->rqst_data.h_vendor.vendor_cmd[2]; sector = bsg_req->rqst_data.h_vendor.vendor_cmd[3]; offset = bsg_req->rqst_data.h_vendor.vendor_cmd[4]; req->region = region; req->sector = sector; req->offset = offset; if (mutex_lock_interruptible(&ctrl->mbox_lock)) return 0; switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) { case BEISCSI_WRITE_FLASH: offset = sector * sector_size + offset; be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, OPCODE_COMMON_WRITE_FLASH, sizeof(*req)); sg_copy_to_buffer(job->request_payload.sg_list, job->request_payload.sg_cnt, nonemb_cmd->va + offset, job->request_len); break; case BEISCSI_READ_FLASH: be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, OPCODE_COMMON_READ_FLASH, sizeof(*req)); break; default: beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, "BG_%d : Unsupported cmd = 0x%x\n\n", bsg_req->rqst_data.h_vendor.vendor_cmd[0]); mutex_unlock(&ctrl->mbox_lock); return -EPERM; } wrb = alloc_mcc_wrb(phba, &tag); if (!wrb) { mutex_unlock(&ctrl->mbox_lock); return 0; } mcc_sge = nonembedded_sgl(wrb); be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, job->request_payload.sg_cnt); mcc_sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); mcc_sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); mcc_sge->len = cpu_to_le32(nonemb_cmd->size); be_mcc_notify(phba, tag); mutex_unlock(&ctrl->mbox_lock); return tag; } /** * mgmt_open_connection()- Establish a TCP CXN * @phba: driver priv structure * @dst_addr: Destination Address * @beiscsi_ep: ptr to device endpoint struct * @nonemb_cmd: ptr to memory allocated for command * * return * Success: Tag number of the MBX Command issued * Failure: Error code **/ int mgmt_open_connection(struct beiscsi_hba *phba, struct sockaddr *dst_addr, struct beiscsi_endpoint *beiscsi_ep, struct be_dma_mem *nonemb_cmd) { struct hwi_controller *phwi_ctrlr; struct hwi_context_memory *phwi_context; struct sockaddr_in *daddr_in = (struct sockaddr_in *)dst_addr; struct sockaddr_in6 *daddr_in6 = (struct sockaddr_in6 *)dst_addr; struct be_ctrl_info *ctrl = &phba->ctrl; struct be_mcc_wrb *wrb; struct tcp_connect_and_offload_in_v1 *req; unsigned short def_hdr_id; unsigned short def_data_id; struct phys_addr template_address = { 0, 0 }; struct phys_addr *ptemplate_address; unsigned int tag = 0; unsigned int i, ulp_num; unsigned short cid = beiscsi_ep->ep_cid; struct be_sge *sge; if (dst_addr->sa_family != PF_INET && dst_addr->sa_family != PF_INET6) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, "BG_%d : unknown addr family %d\n", dst_addr->sa_family); return 0; } phwi_ctrlr = phba->phwi_ctrlr; phwi_context = phwi_ctrlr->phwi_ctxt; ulp_num = phwi_ctrlr->wrb_context[BE_GET_CRI_FROM_CID(cid)].ulp_num; def_hdr_id = (unsigned short)HWI_GET_DEF_HDRQ_ID(phba, ulp_num); def_data_id = (unsigned short)HWI_GET_DEF_BUFQ_ID(phba, ulp_num); ptemplate_address = &template_address; ISCSI_GET_PDU_TEMPLATE_ADDRESS(phba, ptemplate_address); if (mutex_lock_interruptible(&ctrl->mbox_lock)) return 0; wrb = alloc_mcc_wrb(phba, &tag); if (!wrb) { mutex_unlock(&ctrl->mbox_lock); return 0; } sge = nonembedded_sgl(wrb); req = nonemb_cmd->va; memset(req, 0, sizeof(*req)); be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD, nonemb_cmd->size); if (dst_addr->sa_family == PF_INET) { __be32 s_addr = daddr_in->sin_addr.s_addr; req->ip_address.ip_type = BEISCSI_IP_TYPE_V4; req->ip_address.addr[0] = s_addr & 0x000000ff; req->ip_address.addr[1] = (s_addr & 0x0000ff00) >> 8; req->ip_address.addr[2] = (s_addr & 0x00ff0000) >> 16; req->ip_address.addr[3] = (s_addr & 0xff000000) >> 24; req->tcp_port = ntohs(daddr_in->sin_port); beiscsi_ep->dst_addr = daddr_in->sin_addr.s_addr; beiscsi_ep->dst_tcpport = ntohs(daddr_in->sin_port); beiscsi_ep->ip_type = BEISCSI_IP_TYPE_V4; } else { /* else its PF_INET6 family */ req->ip_address.ip_type = BEISCSI_IP_TYPE_V6; memcpy(&req->ip_address.addr, &daddr_in6->sin6_addr.in6_u.u6_addr8, 16); req->tcp_port = ntohs(daddr_in6->sin6_port); beiscsi_ep->dst_tcpport = ntohs(daddr_in6->sin6_port); memcpy(&beiscsi_ep->dst6_addr, &daddr_in6->sin6_addr.in6_u.u6_addr8, 16); beiscsi_ep->ip_type = BEISCSI_IP_TYPE_V6; } req->cid = cid; i = phba->nxt_cqid++; if (phba->nxt_cqid == phba->num_cpus) phba->nxt_cqid = 0; req->cq_id = phwi_context->be_cq[i].id; beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BG_%d : i=%d cq_id=%d\n", i, req->cq_id); req->defq_id = def_hdr_id; req->hdr_ring_id = def_hdr_id; req->data_ring_id = def_data_id; req->do_offload = 1; req->dataout_template_pa.lo = ptemplate_address->lo; req->dataout_template_pa.hi = ptemplate_address->hi; sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); sge->len = cpu_to_le32(nonemb_cmd->size); if (!is_chip_be2_be3r(phba)) { req->hdr.version = MBX_CMD_VER1; req->tcp_window_size = 0x8000; req->tcp_window_scale_count = 2; } be_mcc_notify(phba, tag); mutex_unlock(&ctrl->mbox_lock); return tag; } /** * beiscsi_exec_nemb_cmd()- execute non-embedded MBX cmd * @phba: driver priv structure * @nonemb_cmd: DMA address of the MBX command to be issued * @cbfn: callback func on MCC completion * @resp_buf: buffer to copy the MBX cmd response * @resp_buf_len: response length to be copied * **/ static int beiscsi_exec_nemb_cmd(struct beiscsi_hba *phba, struct be_dma_mem *nonemb_cmd, void (*cbfn)(struct beiscsi_hba *, unsigned int), void *resp_buf, u32 resp_buf_len) { struct be_ctrl_info *ctrl = &phba->ctrl; struct be_mcc_wrb *wrb; struct be_sge *sge; unsigned int tag; int rc = 0; mutex_lock(&ctrl->mbox_lock); wrb = alloc_mcc_wrb(phba, &tag); if (!wrb) { mutex_unlock(&ctrl->mbox_lock); return -ENOMEM; } sge = nonembedded_sgl(wrb); be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1); sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); sge->pa_lo = cpu_to_le32(lower_32_bits(nonemb_cmd->dma)); sge->len = cpu_to_le32(nonemb_cmd->size); if (cbfn) { struct be_dma_mem *tag_mem; set_bit(MCC_TAG_STATE_ASYNC, &ctrl->ptag_state[tag].tag_state); ctrl->ptag_state[tag].cbfn = cbfn; tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state; /* store DMA mem to be freed in callback */ tag_mem->size = nonemb_cmd->size; tag_mem->va = nonemb_cmd->va; tag_mem->dma = nonemb_cmd->dma; } be_mcc_notify(phba, tag); mutex_unlock(&ctrl->mbox_lock); /* with cbfn set, its async cmd, don't wait */ if (cbfn) return 0; rc = beiscsi_mccq_compl_wait(phba, tag, NULL, nonemb_cmd); /* copy the response, if any */ if (resp_buf) memcpy(resp_buf, nonemb_cmd->va, resp_buf_len); return rc; } static int beiscsi_prep_nemb_cmd(struct beiscsi_hba *phba, struct be_dma_mem *cmd, u8 subsystem, u8 opcode, u32 size) { cmd->va = dma_alloc_coherent(&phba->ctrl.pdev->dev, size, &cmd->dma, GFP_KERNEL); if (!cmd->va) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, "BG_%d : Failed to allocate memory for if info\n"); return -ENOMEM; } cmd->size = size; be_cmd_hdr_prepare(cmd->va, subsystem, opcode, size); beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BG_%d : subsystem %u cmd %u size %u\n", subsystem, opcode, size); return 0; } static void beiscsi_free_nemb_cmd(struct beiscsi_hba *phba, struct be_dma_mem *cmd, int rc) { /* * If FW is busy the DMA buffer is saved with the tag. When the cmd * completes this buffer is freed. */ if (rc == -EBUSY) return; dma_free_coherent(&phba->ctrl.pdev->dev, cmd->size, cmd->va, cmd->dma); } static void __beiscsi_eq_delay_compl(struct beiscsi_hba *phba, unsigned int tag) { struct be_dma_mem *tag_mem; /* status is ignored */ __beiscsi_mcc_compl_status(phba, tag, NULL, NULL); tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state; if (tag_mem->size) { dma_free_coherent(&phba->pcidev->dev, tag_mem->size, tag_mem->va, tag_mem->dma); tag_mem->size = 0; } } int beiscsi_modify_eq_delay(struct beiscsi_hba *phba, struct be_set_eqd *set_eqd, int num) { struct be_cmd_req_modify_eq_delay *req; struct be_dma_mem nonemb_cmd; int i, rc; rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req)); if (rc) return rc; req = nonemb_cmd.va; req->num_eq = cpu_to_le32(num); for (i = 0; i < num; i++) { req->delay[i].eq_id = cpu_to_le32(set_eqd[i].eq_id); req->delay[i].phase = 0; req->delay[i].delay_multiplier = cpu_to_le32(set_eqd[i].delay_multiplier); } rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, __beiscsi_eq_delay_compl, NULL, 0); if (rc) { /* * Only free on failure. Async cmds are handled like -EBUSY * where it's handled for us. */ beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc); } return rc; } /** * beiscsi_get_initiator_name - read initiator name from flash * @phba: device priv structure * @name: buffer pointer * @cfg: fetch user configured * */ int beiscsi_get_initiator_name(struct beiscsi_hba *phba, char *name, bool cfg) { struct be_dma_mem nonemb_cmd; struct be_cmd_hba_name resp; struct be_cmd_hba_name *req; int rc; rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd, CMD_SUBSYSTEM_ISCSI_INI, OPCODE_ISCSI_INI_CFG_GET_HBA_NAME, sizeof(resp)); if (rc) return rc; req = nonemb_cmd.va; if (cfg) req->hdr.version = 1; rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, &resp, sizeof(resp)); beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc); if (rc) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, "BS_%d : Initiator Name MBX Failed\n"); return rc; } rc = sprintf(name, "%s\n", resp.initiator_name); return rc; } unsigned int beiscsi_if_get_handle(struct beiscsi_hba *phba) { struct be_ctrl_info *ctrl = &phba->ctrl; struct be_mcc_wrb *wrb; struct be_cmd_get_all_if_id_req *req; struct be_cmd_get_all_if_id_req *pbe_allid; unsigned int tag; int status = 0; if (mutex_lock_interruptible(&ctrl->mbox_lock)) return -EINTR; wrb = alloc_mcc_wrb(phba, &tag); if (!wrb) { mutex_unlock(&ctrl->mbox_lock); return -ENOMEM; } req = embedded_payload(wrb); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, OPCODE_COMMON_ISCSI_NTWK_GET_ALL_IF_ID, sizeof(*req)); be_mcc_notify(phba, tag); mutex_unlock(&ctrl->mbox_lock); status = beiscsi_mccq_compl_wait(phba, tag, &wrb, NULL); if (status) { beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, "BG_%d : %s failed: %d\n", __func__, status); return -EBUSY; } pbe_allid = embedded_payload(wrb); /* we now support only one interface per function */ phba->interface_handle = pbe_allid->if_hndl_list[0]; return status; } static inline bool beiscsi_if_zero_ip(u8 *ip, u32 ip_type) { u32 len; len = (ip_type < BEISCSI_IP_TYPE_V6) ? IP_V4_LEN : IP_V6_LEN; while (len && !ip[len - 1]) len--; return (len == 0); } static int beiscsi_if_mod_gw(struct beiscsi_hba *phba, u32 action, u32 ip_type, u8 *gw) { struct be_cmd_set_def_gateway_req *req; struct be_dma_mem nonemb_cmd; int rt_val; rt_val = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd, CMD_SUBSYSTEM_ISCSI, OPCODE_COMMON_ISCSI_NTWK_MODIFY_DEFAULT_GATEWAY, sizeof(*req)); if (rt_val) return rt_val; req = nonemb_cmd.va; req->action = action; req->ip_addr.ip_type = ip_type; memcpy(req->ip_addr.addr, gw, (ip_type < BEISCSI_IP_TYPE_V6) ? IP_V4_LEN : IP_V6_LEN); rt_val = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0); beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rt_val); return rt_val; } int beiscsi_if_set_gw(struct beiscsi_hba *phba, u32 ip_type, u8 *gw) { struct be_cmd_get_def_gateway_resp gw_resp; int rt_val; memset(&gw_resp, 0, sizeof(gw_resp)); rt_val = beiscsi_if_get_gw(phba, ip_type, &gw_resp); if (rt_val) { beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, "BG_%d : Failed to Get Gateway Addr\n"); return rt_val; } if (!beiscsi_if_zero_ip(gw_resp.ip_addr.addr, ip_type)) { rt_val = beiscsi_if_mod_gw(phba, IP_ACTION_DEL, ip_type, gw_resp.ip_addr.addr); if (rt_val) { beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, "BG_%d : Failed to clear Gateway Addr Set\n"); return rt_val; } } rt_val = beiscsi_if_mod_gw(phba, IP_ACTION_ADD, ip_type, gw); if (rt_val) beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, "BG_%d : Failed to Set Gateway Addr\n"); return rt_val; } int beiscsi_if_get_gw(struct beiscsi_hba *phba, u32 ip_type, struct be_cmd_get_def_gateway_resp *resp) { struct be_cmd_get_def_gateway_req *req; struct be_dma_mem nonemb_cmd; int rc; rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd, CMD_SUBSYSTEM_ISCSI, OPCODE_COMMON_ISCSI_NTWK_GET_DEFAULT_GATEWAY, sizeof(*resp)); if (rc) return rc; req = nonemb_cmd.va; req->ip_type = ip_type; rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, resp, sizeof(*resp)); beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc); return rc; } static int beiscsi_if_clr_ip(struct beiscsi_hba *phba, struct be_cmd_get_if_info_resp *if_info) { struct be_cmd_set_ip_addr_req *req; struct be_dma_mem nonemb_cmd; int rc; rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd, CMD_SUBSYSTEM_ISCSI, OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR, sizeof(*req)); if (rc) return rc; req = nonemb_cmd.va; req->ip_params.record_entry_count = 1; req->ip_params.ip_record.action = IP_ACTION_DEL; req->ip_params.ip_record.interface_hndl = phba->interface_handle; req->ip_params.ip_record.ip_addr.size_of_structure = sizeof(struct be_ip_addr_subnet_format); req->ip_params.ip_record.ip_addr.ip_type = if_info->ip_addr.ip_type; memcpy(req->ip_params.ip_record.ip_addr.addr, if_info->ip_addr.addr, sizeof(if_info->ip_addr.addr)); memcpy(req->ip_params.ip_record.ip_addr.subnet_mask, if_info->ip_addr.subnet_mask, sizeof(if_info->ip_addr.subnet_mask)); rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0); if (rc < 0 || req->ip_params.ip_record.status) { beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BG_%d : failed to clear IP: rc %d status %d\n", rc, req->ip_params.ip_record.status); } beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc); return rc; } static int beiscsi_if_set_ip(struct beiscsi_hba *phba, u8 *ip, u8 *subnet, u32 ip_type) { struct be_cmd_set_ip_addr_req *req; struct be_dma_mem nonemb_cmd; uint32_t ip_len; int rc; rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd, CMD_SUBSYSTEM_ISCSI, OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR, sizeof(*req)); if (rc) return rc; req = nonemb_cmd.va; req->ip_params.record_entry_count = 1; req->ip_params.ip_record.action = IP_ACTION_ADD; req->ip_params.ip_record.interface_hndl = phba->interface_handle; req->ip_params.ip_record.ip_addr.size_of_structure = sizeof(struct be_ip_addr_subnet_format); req->ip_params.ip_record.ip_addr.ip_type = ip_type; ip_len = (ip_type < BEISCSI_IP_TYPE_V6) ? IP_V4_LEN : IP_V6_LEN; memcpy(req->ip_params.ip_record.ip_addr.addr, ip, ip_len); if (subnet) memcpy(req->ip_params.ip_record.ip_addr.subnet_mask, subnet, ip_len); rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0); /** * In some cases, host needs to look into individual record status * even though FW reported success for that IOCTL. */ if (rc < 0 || req->ip_params.ip_record.status) { __beiscsi_log(phba, KERN_ERR, "BG_%d : failed to set IP: rc %d status %d\n", rc, req->ip_params.ip_record.status); if (req->ip_params.ip_record.status) rc = -EINVAL; } beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc); return rc; } int beiscsi_if_en_static(struct beiscsi_hba *phba, u32 ip_type, u8 *ip, u8 *subnet) { struct be_cmd_get_if_info_resp *if_info; struct be_cmd_rel_dhcp_req *reldhcp; struct be_dma_mem nonemb_cmd; int rc; rc = beiscsi_if_get_info(phba, ip_type, &if_info); if (rc) return rc; if (if_info->dhcp_state) { rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd, CMD_SUBSYSTEM_ISCSI, OPCODE_COMMON_ISCSI_NTWK_REL_STATELESS_IP_ADDR, sizeof(*reldhcp)); if (rc) goto exit; reldhcp = nonemb_cmd.va; reldhcp->interface_hndl = phba->interface_handle; reldhcp->ip_type = ip_type; rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0); beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc); if (rc < 0) { beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, "BG_%d : failed to release existing DHCP: %d\n", rc); goto exit; } } /* first delete any IP set */ if (!beiscsi_if_zero_ip(if_info->ip_addr.addr, ip_type)) { rc = beiscsi_if_clr_ip(phba, if_info); if (rc) goto exit; } /* if ip == NULL then this is called just to release DHCP IP */ if (ip) rc = beiscsi_if_set_ip(phba, ip, subnet, ip_type); exit: kfree(if_info); return rc; } int beiscsi_if_en_dhcp(struct beiscsi_hba *phba, u32 ip_type) { struct be_cmd_get_def_gateway_resp gw_resp; struct be_cmd_get_if_info_resp *if_info; struct be_cmd_set_dhcp_req *dhcpreq; struct be_dma_mem nonemb_cmd; u8 *gw; int rc; rc = beiscsi_if_get_info(phba, ip_type, &if_info); if (rc) return rc; if (if_info->dhcp_state) { beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, "BG_%d : DHCP Already Enabled\n"); goto exit; } /* first delete any IP set */ if (!beiscsi_if_zero_ip(if_info->ip_addr.addr, ip_type)) { rc = beiscsi_if_clr_ip(phba, if_info); if (rc) goto exit; } /* delete gateway settings if mode change is to DHCP */ memset(&gw_resp, 0, sizeof(gw_resp)); /* use ip_type provided in if_info */ rc = beiscsi_if_get_gw(phba, if_info->ip_addr.ip_type, &gw_resp); if (rc) { beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, "BG_%d : Failed to Get Gateway Addr\n"); goto exit; } gw = (u8 *)&gw_resp.ip_addr.addr; if (!beiscsi_if_zero_ip(gw, if_info->ip_addr.ip_type)) { rc = beiscsi_if_mod_gw(phba, IP_ACTION_DEL, if_info->ip_addr.ip_type, gw); if (rc) { beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, "BG_%d : Failed to clear Gateway Addr Set\n"); goto exit; } } rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd, CMD_SUBSYSTEM_ISCSI, OPCODE_COMMON_ISCSI_NTWK_CONFIG_STATELESS_IP_ADDR, sizeof(*dhcpreq)); if (rc) goto exit; dhcpreq = nonemb_cmd.va; dhcpreq->flags = 1; /* 1 - blocking; 0 - non-blocking */ dhcpreq->retry_count = 1; dhcpreq->interface_hndl = phba->interface_handle; dhcpreq->ip_type = ip_type; rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0); beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc); exit: kfree(if_info); return rc; } /** * beiscsi_if_set_vlan()- Issue and wait for CMD completion * @phba: device private structure instance * @vlan_tag: VLAN tag * * Issue the MBX Cmd and wait for the completion of the * command. * * returns * Success: 0 * Failure: Non-Xero Value **/ int beiscsi_if_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag) { int rc; unsigned int tag; tag = be_cmd_set_vlan(phba, vlan_tag); if (!tag) { beiscsi_log(phba, KERN_ERR, (BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX), "BG_%d : VLAN Setting Failed\n"); return -EBUSY; } rc = beiscsi_mccq_compl_wait(phba, tag, NULL, NULL); if (rc) { beiscsi_log(phba, KERN_ERR, (BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX), "BS_%d : VLAN MBX Cmd Failed\n"); return rc; } return rc; } int beiscsi_if_get_info(struct beiscsi_hba *phba, int ip_type, struct be_cmd_get_if_info_resp **if_info) { struct be_cmd_get_if_info_req *req; struct be_dma_mem nonemb_cmd; uint32_t ioctl_size = sizeof(struct be_cmd_get_if_info_resp); int rc; rc = beiscsi_if_get_handle(phba); if (rc) return rc; do { rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd, CMD_SUBSYSTEM_ISCSI, OPCODE_COMMON_ISCSI_NTWK_GET_IF_INFO, ioctl_size); if (rc) return rc; req = nonemb_cmd.va; req->interface_hndl = phba->interface_handle; req->ip_type = ip_type; /* Allocate memory for if_info */ *if_info = kzalloc(ioctl_size, GFP_KERNEL); if (!*if_info) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, "BG_%d : Memory Allocation Failure\n"); beiscsi_free_nemb_cmd(phba, &nonemb_cmd, -ENOMEM); return -ENOMEM; } rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, *if_info, ioctl_size); /* Check if the error is because of Insufficent_Buffer */ if (rc == -EAGAIN) { /* Get the new memory size */ ioctl_size = ((struct be_cmd_resp_hdr *) nonemb_cmd.va)->actual_resp_len; ioctl_size += sizeof(struct be_cmd_req_hdr); beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc); /* Free the virtual memory */ kfree(*if_info); } else { beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc); break; } } while (true); return rc; } int mgmt_get_nic_conf(struct beiscsi_hba *phba, struct be_cmd_get_nic_conf_resp *nic) { struct be_dma_mem nonemb_cmd; int rc; rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd, CMD_SUBSYSTEM_ISCSI, OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG, sizeof(*nic)); if (rc) return rc; rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, nic, sizeof(*nic)); beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc); return rc; } static void beiscsi_boot_process_compl(struct beiscsi_hba *phba, unsigned int tag) { struct be_cmd_get_boot_target_resp *boot_resp; struct be_cmd_resp_logout_fw_sess *logo_resp; struct be_cmd_get_session_resp *sess_resp; struct be_mcc_wrb *wrb; struct boot_struct *bs; int boot_work, status; if (!test_bit(BEISCSI_HBA_BOOT_WORK, &phba->state)) { __beiscsi_log(phba, KERN_ERR, "BG_%d : %s no boot work %lx\n", __func__, phba->state); return; } if (phba->boot_struct.tag != tag) { __beiscsi_log(phba, KERN_ERR, "BG_%d : %s tag mismatch %d:%d\n", __func__, tag, phba->boot_struct.tag); return; } bs = &phba->boot_struct; boot_work = 1; status = 0; switch (bs->action) { case BEISCSI_BOOT_REOPEN_SESS: status = __beiscsi_mcc_compl_status(phba, tag, NULL, NULL); if (!status) bs->action = BEISCSI_BOOT_GET_SHANDLE; else bs->retry--; break; case BEISCSI_BOOT_GET_SHANDLE: status = __beiscsi_mcc_compl_status(phba, tag, &wrb, NULL); if (!status) { boot_resp = embedded_payload(wrb); bs->s_handle = boot_resp->boot_session_handle; } if (bs->s_handle == BE_BOOT_INVALID_SHANDLE) { bs->action = BEISCSI_BOOT_REOPEN_SESS; bs->retry--; } else { bs->action = BEISCSI_BOOT_GET_SINFO; } break; case BEISCSI_BOOT_GET_SINFO: status = __beiscsi_mcc_compl_status(phba, tag, NULL, &bs->nonemb_cmd); if (!status) { sess_resp = bs->nonemb_cmd.va; memcpy(&bs->boot_sess, &sess_resp->session_info, sizeof(struct mgmt_session_info)); bs->action = BEISCSI_BOOT_LOGOUT_SESS; } else { __beiscsi_log(phba, KERN_ERR, "BG_%d : get boot session info error : 0x%x\n", status); boot_work = 0; } dma_free_coherent(&phba->ctrl.pdev->dev, bs->nonemb_cmd.size, bs->nonemb_cmd.va, bs->nonemb_cmd.dma); bs->nonemb_cmd.va = NULL; break; case BEISCSI_BOOT_LOGOUT_SESS: status = __beiscsi_mcc_compl_status(phba, tag, &wrb, NULL); if (!status) { logo_resp = embedded_payload(wrb); if (logo_resp->session_status != BE_SESS_STATUS_CLOSE) { __beiscsi_log(phba, KERN_ERR, "BG_%d : FW boot session logout error : 0x%x\n", logo_resp->session_status); } } /* continue to create boot_kset even if logout failed? */ bs->action = BEISCSI_BOOT_CREATE_KSET; break; default: break; } /* clear the tag so no other completion matches this tag */ bs->tag = 0; if (!bs->retry) { boot_work = 0; __beiscsi_log(phba, KERN_ERR, "BG_%d : failed to setup boot target: status %d action %d\n", status, bs->action); } if (!boot_work) { /* wait for next event to start boot_work */ clear_bit(BEISCSI_HBA_BOOT_WORK, &phba->state); return; } schedule_work(&phba->boot_work); } /** * beiscsi_boot_logout_sess()- Logout from boot FW session * @phba: Device priv structure instance * * return * the TAG used for MBOX Command * */ unsigned int beiscsi_boot_logout_sess(struct beiscsi_hba *phba) { struct be_ctrl_info *ctrl = &phba->ctrl; struct be_mcc_wrb *wrb; struct be_cmd_req_logout_fw_sess *req; unsigned int tag; mutex_lock(&ctrl->mbox_lock); wrb = alloc_mcc_wrb(phba, &tag); if (!wrb) { mutex_unlock(&ctrl->mbox_lock); return 0; } req = embedded_payload(wrb); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI, OPCODE_ISCSI_INI_SESSION_LOGOUT_TARGET, sizeof(struct be_cmd_req_logout_fw_sess)); /* Use the session handle copied into boot_sess */ req->session_handle = phba->boot_struct.boot_sess.session_handle; phba->boot_struct.tag = tag; set_bit(MCC_TAG_STATE_ASYNC, &ctrl->ptag_state[tag].tag_state); ctrl->ptag_state[tag].cbfn = beiscsi_boot_process_compl; be_mcc_notify(phba, tag); mutex_unlock(&ctrl->mbox_lock); return tag; } /** * beiscsi_boot_reopen_sess()- Reopen boot session * @phba: Device priv structure instance * * return * the TAG used for MBOX Command * **/ unsigned int beiscsi_boot_reopen_sess(struct beiscsi_hba *phba) { struct be_ctrl_info *ctrl = &phba->ctrl; struct be_mcc_wrb *wrb; struct be_cmd_reopen_session_req *req; unsigned int tag; mutex_lock(&ctrl->mbox_lock); wrb = alloc_mcc_wrb(phba, &tag); if (!wrb) { mutex_unlock(&ctrl->mbox_lock); return 0; } req = embedded_payload(wrb); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI, OPCODE_ISCSI_INI_DRIVER_REOPEN_ALL_SESSIONS, sizeof(struct be_cmd_reopen_session_resp)); req->reopen_type = BE_REOPEN_BOOT_SESSIONS; req->session_handle = BE_BOOT_INVALID_SHANDLE; phba->boot_struct.tag = tag; set_bit(MCC_TAG_STATE_ASYNC, &ctrl->ptag_state[tag].tag_state); ctrl->ptag_state[tag].cbfn = beiscsi_boot_process_compl; be_mcc_notify(phba, tag); mutex_unlock(&ctrl->mbox_lock); return tag; } /** * beiscsi_boot_get_sinfo()- Get boot session info * @phba: device priv structure instance * * Fetches the boot_struct.s_handle info from FW. * return * the TAG used for MBOX Command * **/ unsigned int beiscsi_boot_get_sinfo(struct beiscsi_hba *phba) { struct be_ctrl_info *ctrl = &phba->ctrl; struct be_cmd_get_session_req *req; struct be_dma_mem *nonemb_cmd; struct be_mcc_wrb *wrb; struct be_sge *sge; unsigned int tag; mutex_lock(&ctrl->mbox_lock); wrb = alloc_mcc_wrb(phba, &tag); if (!wrb) { mutex_unlock(&ctrl->mbox_lock); return 0; } nonemb_cmd = &phba->boot_struct.nonemb_cmd; nonemb_cmd->size = sizeof(struct be_cmd_get_session_resp); nonemb_cmd->va = dma_alloc_coherent(&phba->ctrl.pdev->dev, nonemb_cmd->size, &nonemb_cmd->dma, GFP_KERNEL); if (!nonemb_cmd->va) { mutex_unlock(&ctrl->mbox_lock); return 0; } req = nonemb_cmd->va; memset(req, 0, sizeof(*req)); sge = nonembedded_sgl(wrb); be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI, OPCODE_ISCSI_INI_SESSION_GET_A_SESSION, sizeof(struct be_cmd_get_session_resp)); req->session_handle = phba->boot_struct.s_handle; sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); sge->len = cpu_to_le32(nonemb_cmd->size); phba->boot_struct.tag = tag; set_bit(MCC_TAG_STATE_ASYNC, &ctrl->ptag_state[tag].tag_state); ctrl->ptag_state[tag].cbfn = beiscsi_boot_process_compl; be_mcc_notify(phba, tag); mutex_unlock(&ctrl->mbox_lock); return tag; } unsigned int __beiscsi_boot_get_shandle(struct beiscsi_hba *phba, int async) { struct be_ctrl_info *ctrl = &phba->ctrl; struct be_mcc_wrb *wrb; struct be_cmd_get_boot_target_req *req; unsigned int tag; mutex_lock(&ctrl->mbox_lock); wrb = alloc_mcc_wrb(phba, &tag); if (!wrb) { mutex_unlock(&ctrl->mbox_lock); return 0; } req = embedded_payload(wrb); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI, OPCODE_ISCSI_INI_BOOT_GET_BOOT_TARGET, sizeof(struct be_cmd_get_boot_target_resp)); if (async) { phba->boot_struct.tag = tag; set_bit(MCC_TAG_STATE_ASYNC, &ctrl->ptag_state[tag].tag_state); ctrl->ptag_state[tag].cbfn = beiscsi_boot_process_compl; } be_mcc_notify(phba, tag); mutex_unlock(&ctrl->mbox_lock); return tag; } /** * beiscsi_boot_get_shandle()- Get boot session handle * @phba: device priv structure instance * @s_handle: session handle returned for boot session. * * return * Success: 1 * Failure: negative * **/ int beiscsi_boot_get_shandle(struct beiscsi_hba *phba, unsigned int *s_handle) { struct be_cmd_get_boot_target_resp *boot_resp; struct be_mcc_wrb *wrb; unsigned int tag; int rc; *s_handle = BE_BOOT_INVALID_SHANDLE; /* get configured boot session count and handle */ tag = __beiscsi_boot_get_shandle(phba, 0); if (!tag) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT, "BG_%d : Getting Boot Target Info Failed\n"); return -EAGAIN; } rc = beiscsi_mccq_compl_wait(phba, tag, &wrb, NULL); if (rc) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, "BG_%d : MBX CMD get_boot_target Failed\n"); return -EBUSY; } boot_resp = embedded_payload(wrb); /* check if there are any boot targets configured */ if (!boot_resp->boot_session_count) { __beiscsi_log(phba, KERN_INFO, "BG_%d : No boot targets configured\n"); return -ENXIO; } /* only if FW has logged in to the boot target, s_handle is valid */ *s_handle = boot_resp->boot_session_handle; return 1; } /** * beiscsi_drvr_ver_disp()- Display the driver Name and Version * @dev: ptr to device not used. * @attr: device attribute, not used. * @buf: contains formatted text driver name and version * * return * size of the formatted string **/ ssize_t beiscsi_drvr_ver_disp(struct device *dev, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, BE_NAME "\n"); } /** * beiscsi_fw_ver_disp()- Display Firmware Version * @dev: ptr to device not used. * @attr: device attribute, not used. * @buf: contains formatted text Firmware version * * return * size of the formatted string **/ ssize_t beiscsi_fw_ver_disp(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct beiscsi_hba *phba = iscsi_host_priv(shost); return snprintf(buf, PAGE_SIZE, "%s\n", phba->fw_ver_str); } /** * beiscsi_active_session_disp()- Display Sessions Active * @dev: ptr to device not used. * @attr: device attribute, not used. * @buf: contains formatted text Session Count * * return * size of the formatted string **/ ssize_t beiscsi_active_session_disp(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct beiscsi_hba *phba = iscsi_host_priv(shost); uint16_t avlbl_cids = 0, ulp_num, len = 0, total_cids = 0; for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { avlbl_cids = BEISCSI_ULP_AVLBL_CID(phba, ulp_num); total_cids = BEISCSI_GET_CID_COUNT(phba, ulp_num); len += scnprintf(buf+len, PAGE_SIZE - len, "ULP%d : %d\n", ulp_num, (total_cids - avlbl_cids)); } else len += scnprintf(buf+len, PAGE_SIZE - len, "ULP%d : %d\n", ulp_num, 0); } return len; } /** * beiscsi_free_session_disp()- Display Avaliable Session * @dev: ptr to device not used. * @attr: device attribute, not used. * @buf: contains formatted text Session Count * * return * size of the formatted string **/ ssize_t beiscsi_free_session_disp(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct beiscsi_hba *phba = iscsi_host_priv(shost); uint16_t ulp_num, len = 0; for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) len += scnprintf(buf+len, PAGE_SIZE - len, "ULP%d : %d\n", ulp_num, BEISCSI_ULP_AVLBL_CID(phba, ulp_num)); else len += scnprintf(buf+len, PAGE_SIZE - len, "ULP%d : %d\n", ulp_num, 0); } return len; } /** * beiscsi_adap_family_disp()- Display adapter family. * @dev: ptr to device to get priv structure * @attr: device attribute, not used. * @buf: contains formatted text driver name and version * * return * size of the formatted string **/ ssize_t beiscsi_adap_family_disp(struct device *dev, struct device_attribute *attr, char *buf) { uint16_t dev_id = 0; struct Scsi_Host *shost = class_to_shost(dev); struct beiscsi_hba *phba = iscsi_host_priv(shost); dev_id = phba->pcidev->device; switch (dev_id) { case BE_DEVICE_ID1: case OC_DEVICE_ID1: case OC_DEVICE_ID2: return snprintf(buf, PAGE_SIZE, "Obsolete/Unsupported BE2 Adapter Family\n"); case BE_DEVICE_ID2: case OC_DEVICE_ID3: return snprintf(buf, PAGE_SIZE, "BE3-R Adapter Family\n"); case OC_SKH_ID1: return snprintf(buf, PAGE_SIZE, "Skyhawk-R Adapter Family\n"); default: return snprintf(buf, PAGE_SIZE, "Unknown Adapter Family: 0x%x\n", dev_id); } } /** * beiscsi_phys_port_disp()- Display Physical Port Identifier * @dev: ptr to device not used. * @attr: device attribute, not used. * @buf: contains formatted text port identifier * * return * size of the formatted string **/ ssize_t beiscsi_phys_port_disp(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct beiscsi_hba *phba = iscsi_host_priv(shost); return snprintf(buf, PAGE_SIZE, "Port Identifier : %u\n", phba->fw_config.phys_port); } void beiscsi_offload_cxn_v0(struct beiscsi_offload_params *params, struct wrb_handle *pwrb_handle, struct be_mem_descriptor *mem_descr, struct hwi_wrb_context *pwrb_context) { struct iscsi_wrb *pwrb = pwrb_handle->pwrb; AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, max_send_data_segment_length, pwrb, params->dw[offsetof(struct amap_beiscsi_offload_params, max_send_data_segment_length) / 32]); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, type, pwrb, BE_TGT_CTX_UPDT_CMD); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, first_burst_length, pwrb, params->dw[offsetof(struct amap_beiscsi_offload_params, first_burst_length) / 32]); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, erl, pwrb, (params->dw[offsetof(struct amap_beiscsi_offload_params, erl) / 32] & OFFLD_PARAMS_ERL)); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, dde, pwrb, (params->dw[offsetof(struct amap_beiscsi_offload_params, dde) / 32] & OFFLD_PARAMS_DDE) >> 2); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, hde, pwrb, (params->dw[offsetof(struct amap_beiscsi_offload_params, hde) / 32] & OFFLD_PARAMS_HDE) >> 3); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ir2t, pwrb, (params->dw[offsetof(struct amap_beiscsi_offload_params, ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, imd, pwrb, (params->dw[offsetof(struct amap_beiscsi_offload_params, imd) / 32] & OFFLD_PARAMS_IMD) >> 5); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, stat_sn, pwrb, (params->dw[offsetof(struct amap_beiscsi_offload_params, exp_statsn) / 32] + 1)); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, wrb_idx, pwrb, pwrb_handle->wrb_index); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, max_burst_length, pwrb, params->dw[offsetof (struct amap_beiscsi_offload_params, max_burst_length) / 32]); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ptr2nextwrb, pwrb, pwrb_handle->wrb_index); if (pwrb_context->plast_wrb) AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ptr2nextwrb, pwrb_context->plast_wrb, pwrb_handle->wrb_index); pwrb_context->plast_wrb = pwrb; AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, session_state, pwrb, 0); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, compltonack, pwrb, 1); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, notpredblq, pwrb, 0); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, mode, pwrb, 0); mem_descr += ISCSI_MEM_GLOBAL_HEADER; AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, pad_buffer_addr_hi, pwrb, mem_descr->mem_array[0].bus_address.u.a32.address_hi); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, pad_buffer_addr_lo, pwrb, mem_descr->mem_array[0].bus_address.u.a32.address_lo); } void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params, struct wrb_handle *pwrb_handle, struct hwi_wrb_context *pwrb_context) { struct iscsi_wrb *pwrb = pwrb_handle->pwrb; AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, max_burst_length, pwrb, params->dw[offsetof (struct amap_beiscsi_offload_params, max_burst_length) / 32]); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, type, pwrb, BE_TGT_CTX_UPDT_CMD); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, ptr2nextwrb, pwrb, pwrb_handle->wrb_index); if (pwrb_context->plast_wrb) AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, ptr2nextwrb, pwrb_context->plast_wrb, pwrb_handle->wrb_index); pwrb_context->plast_wrb = pwrb; AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, wrb_idx, pwrb, pwrb_handle->wrb_index); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, max_send_data_segment_length, pwrb, params->dw[offsetof(struct amap_beiscsi_offload_params, max_send_data_segment_length) / 32]); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, first_burst_length, pwrb, params->dw[offsetof(struct amap_beiscsi_offload_params, first_burst_length) / 32]); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, max_recv_dataseg_len, pwrb, params->dw[offsetof(struct amap_beiscsi_offload_params, max_recv_data_segment_length) / 32]); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, max_cxns, pwrb, BEISCSI_MAX_CXNS); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, erl, pwrb, (params->dw[offsetof(struct amap_beiscsi_offload_params, erl) / 32] & OFFLD_PARAMS_ERL)); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, dde, pwrb, (params->dw[offsetof(struct amap_beiscsi_offload_params, dde) / 32] & OFFLD_PARAMS_DDE) >> 2); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, hde, pwrb, (params->dw[offsetof(struct amap_beiscsi_offload_params, hde) / 32] & OFFLD_PARAMS_HDE) >> 3); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, ir2t, pwrb, (params->dw[offsetof(struct amap_beiscsi_offload_params, ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, imd, pwrb, (params->dw[offsetof(struct amap_beiscsi_offload_params, imd) / 32] & OFFLD_PARAMS_IMD) >> 5); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, data_seq_inorder, pwrb, (params->dw[offsetof(struct amap_beiscsi_offload_params, data_seq_inorder) / 32] & OFFLD_PARAMS_DATA_SEQ_INORDER) >> 6); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, pdu_seq_inorder, pwrb, (params->dw[offsetof(struct amap_beiscsi_offload_params, pdu_seq_inorder) / 32] & OFFLD_PARAMS_PDU_SEQ_INORDER) >> 7); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, max_r2t, pwrb, (params->dw[offsetof(struct amap_beiscsi_offload_params, max_r2t) / 32] & OFFLD_PARAMS_MAX_R2T) >> 8); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, stat_sn, pwrb, (params->dw[offsetof(struct amap_beiscsi_offload_params, exp_statsn) / 32] + 1)); } unsigned int beiscsi_invalidate_cxn(struct beiscsi_hba *phba, struct beiscsi_endpoint *beiscsi_ep) { struct be_invalidate_connection_params_in *req; struct be_ctrl_info *ctrl = &phba->ctrl; struct be_mcc_wrb *wrb; unsigned int tag = 0; mutex_lock(&ctrl->mbox_lock); wrb = alloc_mcc_wrb(phba, &tag); if (!wrb) { mutex_unlock(&ctrl->mbox_lock); return 0; } req = embedded_payload(wrb); be_wrb_hdr_prepare(wrb, sizeof(union be_invalidate_connection_params), true, 0); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI, OPCODE_ISCSI_INI_DRIVER_INVALIDATE_CONNECTION, sizeof(*req)); req->session_handle = beiscsi_ep->fw_handle; req->cid = beiscsi_ep->ep_cid; if (beiscsi_ep->conn) req->cleanup_type = BE_CLEANUP_TYPE_INVALIDATE; else req->cleanup_type = BE_CLEANUP_TYPE_ISSUE_TCP_RST; /** * 0 - non-persistent targets * 1 - save session info on flash */ req->save_cfg = 0; be_mcc_notify(phba, tag); mutex_unlock(&ctrl->mbox_lock); return tag; } unsigned int beiscsi_upload_cxn(struct beiscsi_hba *phba, struct beiscsi_endpoint *beiscsi_ep) { struct be_ctrl_info *ctrl = &phba->ctrl; struct be_mcc_wrb *wrb; struct be_tcp_upload_params_in *req; unsigned int tag; mutex_lock(&ctrl->mbox_lock); wrb = alloc_mcc_wrb(phba, &tag); if (!wrb) { mutex_unlock(&ctrl->mbox_lock); return 0; } req = embedded_payload(wrb); be_wrb_hdr_prepare(wrb, sizeof(union be_tcp_upload_params), true, 0); be_cmd_hdr_prepare(&req->hdr, CMD_COMMON_TCP_UPLOAD, OPCODE_COMMON_TCP_UPLOAD, sizeof(*req)); req->id = beiscsi_ep->ep_cid; if (beiscsi_ep->conn) req->upload_type = BE_UPLOAD_TYPE_GRACEFUL; else req->upload_type = BE_UPLOAD_TYPE_ABORT; be_mcc_notify(phba, tag); mutex_unlock(&ctrl->mbox_lock); return tag; } int beiscsi_mgmt_invalidate_icds(struct beiscsi_hba *phba, struct invldt_cmd_tbl *inv_tbl, unsigned int nents) { struct be_ctrl_info *ctrl = &phba->ctrl; struct invldt_cmds_params_in *req; struct be_dma_mem nonemb_cmd; struct be_mcc_wrb *wrb; unsigned int i, tag; struct be_sge *sge; int rc; if (!nents || nents > BE_INVLDT_CMD_TBL_SZ) return -EINVAL; nonemb_cmd.size = sizeof(union be_invldt_cmds_params); nonemb_cmd.va = dma_alloc_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size, &nonemb_cmd.dma, GFP_KERNEL); if (!nonemb_cmd.va) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, "BM_%d : invldt_cmds_params alloc failed\n"); return -ENOMEM; } mutex_lock(&ctrl->mbox_lock); wrb = alloc_mcc_wrb(phba, &tag); if (!wrb) { mutex_unlock(&ctrl->mbox_lock); dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size, nonemb_cmd.va, nonemb_cmd.dma); return -ENOMEM; } req = nonemb_cmd.va; be_wrb_hdr_prepare(wrb, nonemb_cmd.size, false, 1); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, OPCODE_COMMON_ISCSI_ERROR_RECOVERY_INVALIDATE_COMMANDS, sizeof(*req)); req->ref_handle = 0; req->cleanup_type = CMD_ISCSI_COMMAND_INVALIDATE; for (i = 0; i < nents; i++) { req->table[i].icd = inv_tbl[i].icd; req->table[i].cid = inv_tbl[i].cid; req->icd_count++; } sge = nonembedded_sgl(wrb); sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma)); sge->pa_lo = cpu_to_le32(lower_32_bits(nonemb_cmd.dma)); sge->len = cpu_to_le32(nonemb_cmd.size); be_mcc_notify(phba, tag); mutex_unlock(&ctrl->mbox_lock); rc = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd); if (rc != -EBUSY) dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size, nonemb_cmd.va, nonemb_cmd.dma); return rc; }
linux-master
drivers/scsi/be2iscsi/be_mgmt.c
// SPDX-License-Identifier: GPL-2.0-only /* * This file is part of the Emulex Linux Device Driver for Enterprise iSCSI * Host Bus Adapters. Refer to the README file included with this package * for driver version and adapter compatibility. * * Copyright (c) 2018 Broadcom. All Rights Reserved. * The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. * * Contact Information: * [email protected] */ #include <scsi/libiscsi.h> #include <scsi/scsi_transport_iscsi.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_netlink.h> #include <net/netlink.h> #include <scsi/scsi.h> #include "be_iscsi.h" extern struct iscsi_transport beiscsi_iscsi_transport; /** * beiscsi_session_create - creates a new iscsi session * @ep: pointer to iscsi ep * @cmds_max: max commands supported * @qdepth: max queue depth supported * @initial_cmdsn: initial iscsi CMDSN */ struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep, u16 cmds_max, u16 qdepth, u32 initial_cmdsn) { struct Scsi_Host *shost; struct beiscsi_endpoint *beiscsi_ep; struct iscsi_cls_session *cls_session; struct beiscsi_hba *phba; struct iscsi_session *sess; struct beiscsi_session *beiscsi_sess; struct beiscsi_io_task *io_task; if (!ep) { pr_err("beiscsi_session_create: invalid ep\n"); return NULL; } beiscsi_ep = ep->dd_data; phba = beiscsi_ep->phba; if (!beiscsi_hba_is_online(phba)) { beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BS_%d : HBA in error 0x%lx\n", phba->state); return NULL; } beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BS_%d : In beiscsi_session_create\n"); if (cmds_max > beiscsi_ep->phba->params.wrbs_per_cxn) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, "BS_%d : Cannot handle %d cmds." "Max cmds per session supported is %d. Using %d." "\n", cmds_max, beiscsi_ep->phba->params.wrbs_per_cxn, beiscsi_ep->phba->params.wrbs_per_cxn); cmds_max = beiscsi_ep->phba->params.wrbs_per_cxn; } shost = phba->shost; cls_session = iscsi_session_setup(&beiscsi_iscsi_transport, shost, cmds_max, sizeof(*beiscsi_sess), sizeof(*io_task), initial_cmdsn, ISCSI_MAX_TARGET); if (!cls_session) return NULL; sess = cls_session->dd_data; beiscsi_sess = sess->dd_data; beiscsi_sess->bhs_pool = dma_pool_create("beiscsi_bhs_pool", &phba->pcidev->dev, sizeof(struct be_cmd_bhs), 64, 0); if (!beiscsi_sess->bhs_pool) goto destroy_sess; return cls_session; destroy_sess: iscsi_session_teardown(cls_session); return NULL; } /** * beiscsi_session_destroy - destroys iscsi session * @cls_session: pointer to iscsi cls session * * Destroys iSCSI session instance and releases * resources allocated for it. */ void beiscsi_session_destroy(struct iscsi_cls_session *cls_session) { struct iscsi_session *sess = cls_session->dd_data; struct beiscsi_session *beiscsi_sess = sess->dd_data; printk(KERN_INFO "In beiscsi_session_destroy\n"); dma_pool_destroy(beiscsi_sess->bhs_pool); iscsi_session_teardown(cls_session); } /** * beiscsi_session_fail(): Closing session with appropriate error * @cls_session: ptr to session **/ void beiscsi_session_fail(struct iscsi_cls_session *cls_session) { iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED); } /** * beiscsi_conn_create - create an instance of iscsi connection * @cls_session: ptr to iscsi_cls_session * @cid: iscsi cid */ struct iscsi_cls_conn * beiscsi_conn_create(struct iscsi_cls_session *cls_session, u32 cid) { struct beiscsi_hba *phba; struct Scsi_Host *shost; struct iscsi_cls_conn *cls_conn; struct beiscsi_conn *beiscsi_conn; struct iscsi_conn *conn; struct iscsi_session *sess; struct beiscsi_session *beiscsi_sess; shost = iscsi_session_to_shost(cls_session); phba = iscsi_host_priv(shost); beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BS_%d : In beiscsi_conn_create ,cid" "from iscsi layer=%d\n", cid); cls_conn = iscsi_conn_setup(cls_session, sizeof(*beiscsi_conn), cid); if (!cls_conn) return NULL; conn = cls_conn->dd_data; beiscsi_conn = conn->dd_data; beiscsi_conn->ep = NULL; beiscsi_conn->phba = phba; beiscsi_conn->conn = conn; sess = cls_session->dd_data; beiscsi_sess = sess->dd_data; beiscsi_conn->beiscsi_sess = beiscsi_sess; return cls_conn; } /** * beiscsi_conn_bind - Binds iscsi session/connection with TCP connection * @cls_session: pointer to iscsi cls session * @cls_conn: pointer to iscsi cls conn * @transport_fd: EP handle(64 bit) * @is_leading: indicate if this is the session leading connection (MCS) * * This function binds the TCP Conn with iSCSI Connection and Session. */ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session, struct iscsi_cls_conn *cls_conn, u64 transport_fd, int is_leading) { struct iscsi_conn *conn = cls_conn->dd_data; struct beiscsi_conn *beiscsi_conn = conn->dd_data; struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); struct beiscsi_hba *phba = iscsi_host_priv(shost); struct hwi_controller *phwi_ctrlr = phba->phwi_ctrlr; struct hwi_wrb_context *pwrb_context; struct beiscsi_endpoint *beiscsi_ep; struct iscsi_endpoint *ep; uint16_t cri_index; int rc = 0; ep = iscsi_lookup_endpoint(transport_fd); if (!ep) return -EINVAL; beiscsi_ep = ep->dd_data; if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) { rc = -EINVAL; goto put_ep; } if (beiscsi_ep->phba != phba) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, "BS_%d : beiscsi_ep->hba=%p not equal to phba=%p\n", beiscsi_ep->phba, phba); rc = -EEXIST; goto put_ep; } cri_index = BE_GET_CRI_FROM_CID(beiscsi_ep->ep_cid); if (phba->conn_table[cri_index]) { if (beiscsi_conn != phba->conn_table[cri_index] || beiscsi_ep != phba->conn_table[cri_index]->ep) { __beiscsi_log(phba, KERN_ERR, "BS_%d : conn_table not empty at %u: cid %u conn %p:%p\n", cri_index, beiscsi_ep->ep_cid, beiscsi_conn, phba->conn_table[cri_index]); rc = -EINVAL; goto put_ep; } } beiscsi_conn->beiscsi_conn_cid = beiscsi_ep->ep_cid; beiscsi_conn->ep = beiscsi_ep; beiscsi_ep->conn = beiscsi_conn; /** * Each connection is associated with a WRBQ kept in wrb_context. * Store doorbell offset for transmit path. */ pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; beiscsi_conn->doorbell_offset = pwrb_context->doorbell_offset; beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BS_%d : cid %d phba->conn_table[%u]=%p\n", beiscsi_ep->ep_cid, cri_index, beiscsi_conn); phba->conn_table[cri_index] = beiscsi_conn; put_ep: iscsi_put_endpoint(ep); return rc; } static int beiscsi_iface_create_ipv4(struct beiscsi_hba *phba) { if (phba->ipv4_iface) return 0; phba->ipv4_iface = iscsi_create_iface(phba->shost, &beiscsi_iscsi_transport, ISCSI_IFACE_TYPE_IPV4, 0, 0); if (!phba->ipv4_iface) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, "BS_%d : Could not " "create default IPv4 address.\n"); return -ENODEV; } return 0; } static int beiscsi_iface_create_ipv6(struct beiscsi_hba *phba) { if (phba->ipv6_iface) return 0; phba->ipv6_iface = iscsi_create_iface(phba->shost, &beiscsi_iscsi_transport, ISCSI_IFACE_TYPE_IPV6, 0, 0); if (!phba->ipv6_iface) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, "BS_%d : Could not " "create default IPv6 address.\n"); return -ENODEV; } return 0; } void beiscsi_iface_create_default(struct beiscsi_hba *phba) { struct be_cmd_get_if_info_resp *if_info; if (!beiscsi_if_get_info(phba, BEISCSI_IP_TYPE_V4, &if_info)) { beiscsi_iface_create_ipv4(phba); kfree(if_info); } if (!beiscsi_if_get_info(phba, BEISCSI_IP_TYPE_V6, &if_info)) { beiscsi_iface_create_ipv6(phba); kfree(if_info); } } void beiscsi_iface_destroy_default(struct beiscsi_hba *phba) { if (phba->ipv6_iface) { iscsi_destroy_iface(phba->ipv6_iface); phba->ipv6_iface = NULL; } if (phba->ipv4_iface) { iscsi_destroy_iface(phba->ipv4_iface); phba->ipv4_iface = NULL; } } /** * beiscsi_iface_config_vlan()- Set the VLAN TAG * @shost: Scsi Host for the driver instance * @iface_param: Interface paramters * * Set the VLAN TAG for the adapter or disable * the VLAN config * * returns * Success: 0 * Failure: Non-Zero Value **/ static int beiscsi_iface_config_vlan(struct Scsi_Host *shost, struct iscsi_iface_param_info *iface_param) { struct beiscsi_hba *phba = iscsi_host_priv(shost); int ret = -EPERM; switch (iface_param->param) { case ISCSI_NET_PARAM_VLAN_ENABLED: ret = 0; if (iface_param->value[0] != ISCSI_VLAN_ENABLE) ret = beiscsi_if_set_vlan(phba, BEISCSI_VLAN_DISABLE); break; case ISCSI_NET_PARAM_VLAN_TAG: ret = beiscsi_if_set_vlan(phba, *((uint16_t *)iface_param->value)); break; } return ret; } static int beiscsi_iface_config_ipv4(struct Scsi_Host *shost, struct iscsi_iface_param_info *info, void *data, uint32_t dt_len) { struct beiscsi_hba *phba = iscsi_host_priv(shost); u8 *ip = NULL, *subnet = NULL, *gw; struct nlattr *nla; int ret = -EPERM; /* Check the param */ switch (info->param) { case ISCSI_NET_PARAM_IFACE_ENABLE: if (info->value[0] == ISCSI_IFACE_ENABLE) ret = beiscsi_iface_create_ipv4(phba); else { iscsi_destroy_iface(phba->ipv4_iface); phba->ipv4_iface = NULL; } break; case ISCSI_NET_PARAM_IPV4_GW: gw = info->value; ret = beiscsi_if_set_gw(phba, BEISCSI_IP_TYPE_V4, gw); break; case ISCSI_NET_PARAM_IPV4_BOOTPROTO: if (info->value[0] == ISCSI_BOOTPROTO_DHCP) ret = beiscsi_if_en_dhcp(phba, BEISCSI_IP_TYPE_V4); else if (info->value[0] == ISCSI_BOOTPROTO_STATIC) /* release DHCP IP address */ ret = beiscsi_if_en_static(phba, BEISCSI_IP_TYPE_V4, NULL, NULL); else beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, "BS_%d : Invalid BOOTPROTO: %d\n", info->value[0]); break; case ISCSI_NET_PARAM_IPV4_ADDR: ip = info->value; nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_SUBNET); if (nla) { info = nla_data(nla); subnet = info->value; } ret = beiscsi_if_en_static(phba, BEISCSI_IP_TYPE_V4, ip, subnet); break; case ISCSI_NET_PARAM_IPV4_SUBNET: /* * OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR ioctl needs IP * and subnet both. Find IP to be applied for this subnet. */ subnet = info->value; nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_ADDR); if (nla) { info = nla_data(nla); ip = info->value; } ret = beiscsi_if_en_static(phba, BEISCSI_IP_TYPE_V4, ip, subnet); break; } return ret; } static int beiscsi_iface_config_ipv6(struct Scsi_Host *shost, struct iscsi_iface_param_info *iface_param, void *data, uint32_t dt_len) { struct beiscsi_hba *phba = iscsi_host_priv(shost); int ret = -EPERM; switch (iface_param->param) { case ISCSI_NET_PARAM_IFACE_ENABLE: if (iface_param->value[0] == ISCSI_IFACE_ENABLE) ret = beiscsi_iface_create_ipv6(phba); else { iscsi_destroy_iface(phba->ipv6_iface); phba->ipv6_iface = NULL; } break; case ISCSI_NET_PARAM_IPV6_ADDR: ret = beiscsi_if_en_static(phba, BEISCSI_IP_TYPE_V6, iface_param->value, NULL); break; } return ret; } int beiscsi_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t dt_len) { struct iscsi_iface_param_info *iface_param = NULL; struct beiscsi_hba *phba = iscsi_host_priv(shost); struct nlattr *attrib; uint32_t rm_len = dt_len; int ret; if (!beiscsi_hba_is_online(phba)) { beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BS_%d : HBA in error 0x%lx\n", phba->state); return -EBUSY; } /* update interface_handle */ ret = beiscsi_if_get_handle(phba); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, "BS_%d : Getting Interface Handle Failed\n"); return ret; } nla_for_each_attr(attrib, data, dt_len, rm_len) { /* ignore nla_type as it is never used */ if (nla_len(attrib) < sizeof(*iface_param)) return -EINVAL; iface_param = nla_data(attrib); if (iface_param->param_type != ISCSI_NET_PARAM) continue; /* * BE2ISCSI only supports 1 interface */ if (iface_param->iface_num) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, "BS_%d : Invalid iface_num %d." "Only iface_num 0 is supported.\n", iface_param->iface_num); return -EINVAL; } beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BS_%d : %s.0 set param %d", (iface_param->iface_type == ISCSI_IFACE_TYPE_IPV4) ? "ipv4" : "ipv6", iface_param->param); ret = -EPERM; switch (iface_param->param) { case ISCSI_NET_PARAM_VLAN_ENABLED: case ISCSI_NET_PARAM_VLAN_TAG: ret = beiscsi_iface_config_vlan(shost, iface_param); break; default: switch (iface_param->iface_type) { case ISCSI_IFACE_TYPE_IPV4: ret = beiscsi_iface_config_ipv4(shost, iface_param, data, dt_len); break; case ISCSI_IFACE_TYPE_IPV6: ret = beiscsi_iface_config_ipv6(shost, iface_param, data, dt_len); break; } } if (ret == -EPERM) { __beiscsi_log(phba, KERN_ERR, "BS_%d : %s.0 set param %d not permitted", (iface_param->iface_type == ISCSI_IFACE_TYPE_IPV4) ? "ipv4" : "ipv6", iface_param->param); ret = 0; } if (ret) break; } return ret; } static int __beiscsi_iface_get_param(struct beiscsi_hba *phba, struct iscsi_iface *iface, int param, char *buf) { struct be_cmd_get_if_info_resp *if_info; int len, ip_type = BEISCSI_IP_TYPE_V4; if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6) ip_type = BEISCSI_IP_TYPE_V6; len = beiscsi_if_get_info(phba, ip_type, &if_info); if (len) return len; switch (param) { case ISCSI_NET_PARAM_IPV4_ADDR: len = sprintf(buf, "%pI4\n", if_info->ip_addr.addr); break; case ISCSI_NET_PARAM_IPV6_ADDR: len = sprintf(buf, "%pI6\n", if_info->ip_addr.addr); break; case ISCSI_NET_PARAM_IPV4_BOOTPROTO: if (!if_info->dhcp_state) len = sprintf(buf, "static\n"); else len = sprintf(buf, "dhcp\n"); break; case ISCSI_NET_PARAM_IPV4_SUBNET: len = sprintf(buf, "%pI4\n", if_info->ip_addr.subnet_mask); break; case ISCSI_NET_PARAM_VLAN_ENABLED: len = sprintf(buf, "%s\n", (if_info->vlan_priority == BEISCSI_VLAN_DISABLE) ? "disable" : "enable"); break; case ISCSI_NET_PARAM_VLAN_ID: if (if_info->vlan_priority == BEISCSI_VLAN_DISABLE) len = -EINVAL; else len = sprintf(buf, "%d\n", (if_info->vlan_priority & ISCSI_MAX_VLAN_ID)); break; case ISCSI_NET_PARAM_VLAN_PRIORITY: if (if_info->vlan_priority == BEISCSI_VLAN_DISABLE) len = -EINVAL; else len = sprintf(buf, "%d\n", ((if_info->vlan_priority >> 13) & ISCSI_MAX_VLAN_PRIORITY)); break; default: WARN_ON(1); } kfree(if_info); return len; } int beiscsi_iface_get_param(struct iscsi_iface *iface, enum iscsi_param_type param_type, int param, char *buf) { struct Scsi_Host *shost = iscsi_iface_to_shost(iface); struct beiscsi_hba *phba = iscsi_host_priv(shost); struct be_cmd_get_def_gateway_resp gateway; int len = -EPERM; if (param_type != ISCSI_NET_PARAM) return 0; if (!beiscsi_hba_is_online(phba)) { beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BS_%d : HBA in error 0x%lx\n", phba->state); return -EBUSY; } switch (param) { case ISCSI_NET_PARAM_IPV4_ADDR: case ISCSI_NET_PARAM_IPV4_SUBNET: case ISCSI_NET_PARAM_IPV4_BOOTPROTO: case ISCSI_NET_PARAM_IPV6_ADDR: case ISCSI_NET_PARAM_VLAN_ENABLED: case ISCSI_NET_PARAM_VLAN_ID: case ISCSI_NET_PARAM_VLAN_PRIORITY: len = __beiscsi_iface_get_param(phba, iface, param, buf); break; case ISCSI_NET_PARAM_IFACE_ENABLE: if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) len = sprintf(buf, "%s\n", phba->ipv4_iface ? "enable" : "disable"); else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6) len = sprintf(buf, "%s\n", phba->ipv6_iface ? "enable" : "disable"); break; case ISCSI_NET_PARAM_IPV4_GW: memset(&gateway, 0, sizeof(gateway)); len = beiscsi_if_get_gw(phba, BEISCSI_IP_TYPE_V4, &gateway); if (!len) len = sprintf(buf, "%pI4\n", &gateway.ip_addr.addr); break; } return len; } /** * beiscsi_ep_get_param - get the iscsi parameter * @ep: pointer to iscsi ep * @param: parameter type identifier * @buf: buffer pointer * * returns iscsi parameter */ int beiscsi_ep_get_param(struct iscsi_endpoint *ep, enum iscsi_param param, char *buf) { struct beiscsi_endpoint *beiscsi_ep = ep->dd_data; int len; beiscsi_log(beiscsi_ep->phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BS_%d : In beiscsi_ep_get_param," " param= %d\n", param); switch (param) { case ISCSI_PARAM_CONN_PORT: len = sprintf(buf, "%hu\n", beiscsi_ep->dst_tcpport); break; case ISCSI_PARAM_CONN_ADDRESS: if (beiscsi_ep->ip_type == BEISCSI_IP_TYPE_V4) len = sprintf(buf, "%pI4\n", &beiscsi_ep->dst_addr); else len = sprintf(buf, "%pI6\n", &beiscsi_ep->dst6_addr); break; default: len = -EPERM; } return len; } int beiscsi_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param, char *buf, int buflen) { struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_session *session = conn->session; struct beiscsi_hba *phba = NULL; int ret; phba = ((struct beiscsi_conn *)conn->dd_data)->phba; beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BS_%d : In beiscsi_conn_set_param," " param= %d\n", param); ret = iscsi_set_param(cls_conn, param, buf, buflen); if (ret) return ret; /* * If userspace tried to set the value to higher than we can * support override here. */ switch (param) { case ISCSI_PARAM_FIRST_BURST: if (session->first_burst > 8192) session->first_burst = 8192; break; case ISCSI_PARAM_MAX_RECV_DLENGTH: if (conn->max_recv_dlength > 65536) conn->max_recv_dlength = 65536; break; case ISCSI_PARAM_MAX_BURST: if (session->max_burst > 262144) session->max_burst = 262144; break; case ISCSI_PARAM_MAX_XMIT_DLENGTH: if (conn->max_xmit_dlength > 65536) conn->max_xmit_dlength = 65536; fallthrough; default: return 0; } return 0; } /** * beiscsi_get_port_state - Get the Port State * @shost : pointer to scsi_host structure * */ static void beiscsi_get_port_state(struct Scsi_Host *shost) { struct beiscsi_hba *phba = iscsi_host_priv(shost); struct iscsi_cls_host *ihost = shost->shost_data; ihost->port_state = test_bit(BEISCSI_HBA_LINK_UP, &phba->state) ? ISCSI_PORT_STATE_UP : ISCSI_PORT_STATE_DOWN; } /** * beiscsi_get_port_speed - Get the Port Speed from Adapter * @shost : pointer to scsi_host structure * */ static void beiscsi_get_port_speed(struct Scsi_Host *shost) { struct beiscsi_hba *phba = iscsi_host_priv(shost); struct iscsi_cls_host *ihost = shost->shost_data; switch (phba->port_speed) { case BE2ISCSI_LINK_SPEED_10MBPS: ihost->port_speed = ISCSI_PORT_SPEED_10MBPS; break; case BE2ISCSI_LINK_SPEED_100MBPS: ihost->port_speed = ISCSI_PORT_SPEED_100MBPS; break; case BE2ISCSI_LINK_SPEED_1GBPS: ihost->port_speed = ISCSI_PORT_SPEED_1GBPS; break; case BE2ISCSI_LINK_SPEED_10GBPS: ihost->port_speed = ISCSI_PORT_SPEED_10GBPS; break; case BE2ISCSI_LINK_SPEED_25GBPS: ihost->port_speed = ISCSI_PORT_SPEED_25GBPS; break; case BE2ISCSI_LINK_SPEED_40GBPS: ihost->port_speed = ISCSI_PORT_SPEED_40GBPS; break; default: ihost->port_speed = ISCSI_PORT_SPEED_UNKNOWN; } } /** * beiscsi_get_host_param - get the iscsi parameter * @shost: pointer to scsi_host structure * @param: parameter type identifier * @buf: buffer pointer * */ int beiscsi_get_host_param(struct Scsi_Host *shost, enum iscsi_host_param param, char *buf) { struct beiscsi_hba *phba = iscsi_host_priv(shost); int status = 0; if (!beiscsi_hba_is_online(phba)) { beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BS_%d : HBA in error 0x%lx\n", phba->state); return 0; } beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BS_%d : In beiscsi_get_host_param, param = %d\n", param); switch (param) { case ISCSI_HOST_PARAM_HWADDRESS: status = beiscsi_get_macaddr(buf, phba); if (status < 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, "BS_%d : beiscsi_get_macaddr Failed\n"); return 0; } break; case ISCSI_HOST_PARAM_INITIATOR_NAME: /* try fetching user configured name first */ status = beiscsi_get_initiator_name(phba, buf, true); if (status < 0) { status = beiscsi_get_initiator_name(phba, buf, false); if (status < 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, "BS_%d : Retrieving Initiator Name Failed\n"); status = 0; } } break; case ISCSI_HOST_PARAM_PORT_STATE: beiscsi_get_port_state(shost); status = sprintf(buf, "%s\n", iscsi_get_port_state_name(shost)); break; case ISCSI_HOST_PARAM_PORT_SPEED: beiscsi_get_port_speed(shost); status = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost)); break; default: return iscsi_host_get_param(shost, param, buf); } return status; } int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba) { struct be_cmd_get_nic_conf_resp resp; int rc; if (phba->mac_addr_set) return sysfs_format_mac(buf, phba->mac_address, ETH_ALEN); memset(&resp, 0, sizeof(resp)); rc = mgmt_get_nic_conf(phba, &resp); if (rc) return rc; phba->mac_addr_set = true; memcpy(phba->mac_address, resp.mac_address, ETH_ALEN); return sysfs_format_mac(buf, phba->mac_address, ETH_ALEN); } /** * beiscsi_conn_get_stats - get the iscsi stats * @cls_conn: pointer to iscsi cls conn * @stats: pointer to iscsi_stats structure * * returns iscsi stats */ void beiscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats) { struct iscsi_conn *conn = cls_conn->dd_data; struct beiscsi_hba *phba = NULL; phba = ((struct beiscsi_conn *)conn->dd_data)->phba; beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BS_%d : In beiscsi_conn_get_stats\n"); stats->txdata_octets = conn->txdata_octets; stats->rxdata_octets = conn->rxdata_octets; stats->dataout_pdus = conn->dataout_pdus_cnt; stats->scsirsp_pdus = conn->scsirsp_pdus_cnt; stats->scsicmd_pdus = conn->scsicmd_pdus_cnt; stats->datain_pdus = conn->datain_pdus_cnt; stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt; stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt; stats->r2t_pdus = conn->r2t_pdus_cnt; stats->digest_err = 0; stats->timeout_err = 0; stats->custom_length = 1; strcpy(stats->custom[0].desc, "eh_abort_cnt"); stats->custom[0].value = conn->eh_abort_cnt; } /** * beiscsi_set_params_for_offld - get the parameters for offload * @beiscsi_conn: pointer to beiscsi_conn * @params: pointer to offload_params structure */ static void beiscsi_set_params_for_offld(struct beiscsi_conn *beiscsi_conn, struct beiscsi_offload_params *params) { struct iscsi_conn *conn = beiscsi_conn->conn; struct iscsi_session *session = conn->session; AMAP_SET_BITS(struct amap_beiscsi_offload_params, max_burst_length, params, session->max_burst); AMAP_SET_BITS(struct amap_beiscsi_offload_params, max_send_data_segment_length, params, conn->max_xmit_dlength); AMAP_SET_BITS(struct amap_beiscsi_offload_params, first_burst_length, params, session->first_burst); AMAP_SET_BITS(struct amap_beiscsi_offload_params, erl, params, session->erl); AMAP_SET_BITS(struct amap_beiscsi_offload_params, dde, params, conn->datadgst_en); AMAP_SET_BITS(struct amap_beiscsi_offload_params, hde, params, conn->hdrdgst_en); AMAP_SET_BITS(struct amap_beiscsi_offload_params, ir2t, params, session->initial_r2t_en); AMAP_SET_BITS(struct amap_beiscsi_offload_params, imd, params, session->imm_data_en); AMAP_SET_BITS(struct amap_beiscsi_offload_params, data_seq_inorder, params, session->dataseq_inorder_en); AMAP_SET_BITS(struct amap_beiscsi_offload_params, pdu_seq_inorder, params, session->pdu_inorder_en); AMAP_SET_BITS(struct amap_beiscsi_offload_params, max_r2t, params, session->max_r2t); AMAP_SET_BITS(struct amap_beiscsi_offload_params, exp_statsn, params, (conn->exp_statsn - 1)); AMAP_SET_BITS(struct amap_beiscsi_offload_params, max_recv_data_segment_length, params, conn->max_recv_dlength); } /** * beiscsi_conn_start - offload of session to chip * @cls_conn: pointer to beiscsi_conn */ int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn) { struct iscsi_conn *conn = cls_conn->dd_data; struct beiscsi_conn *beiscsi_conn = conn->dd_data; struct beiscsi_endpoint *beiscsi_ep; struct beiscsi_offload_params params; struct beiscsi_hba *phba; phba = ((struct beiscsi_conn *)conn->dd_data)->phba; if (!beiscsi_hba_is_online(phba)) { beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BS_%d : HBA in error 0x%lx\n", phba->state); return -EBUSY; } beiscsi_log(beiscsi_conn->phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BS_%d : In beiscsi_conn_start\n"); memset(&params, 0, sizeof(struct beiscsi_offload_params)); beiscsi_ep = beiscsi_conn->ep; if (!beiscsi_ep) beiscsi_log(beiscsi_conn->phba, KERN_ERR, BEISCSI_LOG_CONFIG, "BS_%d : In beiscsi_conn_start , no beiscsi_ep\n"); beiscsi_conn->login_in_progress = 0; beiscsi_set_params_for_offld(beiscsi_conn, &params); beiscsi_offload_connection(beiscsi_conn, &params); iscsi_conn_start(cls_conn); return 0; } /** * beiscsi_get_cid - Allocate a cid * @phba: The phba instance */ static int beiscsi_get_cid(struct beiscsi_hba *phba) { uint16_t cid_avlbl_ulp0, cid_avlbl_ulp1; unsigned short cid, cid_from_ulp; struct ulp_cid_info *cid_info; /* Find the ULP which has more CID available */ cid_avlbl_ulp0 = (phba->cid_array_info[BEISCSI_ULP0]) ? BEISCSI_ULP0_AVLBL_CID(phba) : 0; cid_avlbl_ulp1 = (phba->cid_array_info[BEISCSI_ULP1]) ? BEISCSI_ULP1_AVLBL_CID(phba) : 0; cid_from_ulp = (cid_avlbl_ulp0 > cid_avlbl_ulp1) ? BEISCSI_ULP0 : BEISCSI_ULP1; /** * If iSCSI protocol is loaded only on ULP 0, and when cid_avlbl_ulp * is ZERO for both, ULP 1 is returned. * Check if ULP is loaded before getting new CID. */ if (!test_bit(cid_from_ulp, (void *)&phba->fw_config.ulp_supported)) return BE_INVALID_CID; cid_info = phba->cid_array_info[cid_from_ulp]; cid = cid_info->cid_array[cid_info->cid_alloc]; if (!cid_info->avlbl_cids || cid == BE_INVALID_CID) { __beiscsi_log(phba, KERN_ERR, "BS_%d : failed to get cid: available %u:%u\n", cid_info->avlbl_cids, cid_info->cid_free); return BE_INVALID_CID; } /* empty the slot */ cid_info->cid_array[cid_info->cid_alloc++] = BE_INVALID_CID; if (cid_info->cid_alloc == BEISCSI_GET_CID_COUNT(phba, cid_from_ulp)) cid_info->cid_alloc = 0; cid_info->avlbl_cids--; return cid; } /** * beiscsi_put_cid - Free the cid * @phba: The phba for which the cid is being freed * @cid: The cid to free */ static void beiscsi_put_cid(struct beiscsi_hba *phba, unsigned short cid) { uint16_t cri_index = BE_GET_CRI_FROM_CID(cid); struct hwi_wrb_context *pwrb_context; struct hwi_controller *phwi_ctrlr; struct ulp_cid_info *cid_info; uint16_t cid_post_ulp; phwi_ctrlr = phba->phwi_ctrlr; pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; cid_post_ulp = pwrb_context->ulp_num; cid_info = phba->cid_array_info[cid_post_ulp]; /* fill only in empty slot */ if (cid_info->cid_array[cid_info->cid_free] != BE_INVALID_CID) { __beiscsi_log(phba, KERN_ERR, "BS_%d : failed to put cid %u: available %u:%u\n", cid, cid_info->avlbl_cids, cid_info->cid_free); return; } cid_info->cid_array[cid_info->cid_free++] = cid; if (cid_info->cid_free == BEISCSI_GET_CID_COUNT(phba, cid_post_ulp)) cid_info->cid_free = 0; cid_info->avlbl_cids++; } /** * beiscsi_free_ep - free endpoint * @beiscsi_ep: pointer to device endpoint struct */ static void beiscsi_free_ep(struct beiscsi_endpoint *beiscsi_ep) { struct beiscsi_hba *phba = beiscsi_ep->phba; struct beiscsi_conn *beiscsi_conn; beiscsi_put_cid(phba, beiscsi_ep->ep_cid); beiscsi_ep->phba = NULL; /* clear this to track freeing in beiscsi_ep_disconnect */ phba->ep_array[BE_GET_CRI_FROM_CID(beiscsi_ep->ep_cid)] = NULL; /** * Check if any connection resource allocated by driver * is to be freed.This case occurs when target redirection * or connection retry is done. **/ if (!beiscsi_ep->conn) return; beiscsi_conn = beiscsi_ep->conn; /** * Break ep->conn link here so that completions after * this are ignored. */ beiscsi_ep->conn = NULL; if (beiscsi_conn->login_in_progress) { beiscsi_free_mgmt_task_handles(beiscsi_conn, beiscsi_conn->task); beiscsi_conn->login_in_progress = 0; } } /** * beiscsi_open_conn - Ask FW to open a TCP connection * @ep: pointer to device endpoint struct * @src_addr: The source IP address * @dst_addr: The Destination IP address * @non_blocking: blocking or non-blocking call * * Asks the FW to open a TCP connection */ static int beiscsi_open_conn(struct iscsi_endpoint *ep, struct sockaddr *src_addr, struct sockaddr *dst_addr, int non_blocking) { struct beiscsi_endpoint *beiscsi_ep = ep->dd_data; struct beiscsi_hba *phba = beiscsi_ep->phba; struct tcp_connect_and_offload_out *ptcpcnct_out; struct be_dma_mem nonemb_cmd; unsigned int tag, req_memsize; int ret = -ENOMEM; beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BS_%d : In beiscsi_open_conn\n"); beiscsi_ep->ep_cid = beiscsi_get_cid(phba); if (beiscsi_ep->ep_cid == BE_INVALID_CID) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, "BS_%d : No free cid available\n"); return ret; } beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BS_%d : In beiscsi_open_conn, ep_cid=%d\n", beiscsi_ep->ep_cid); phba->ep_array[BE_GET_CRI_FROM_CID (beiscsi_ep->ep_cid)] = ep; beiscsi_ep->cid_vld = 0; if (is_chip_be2_be3r(phba)) req_memsize = sizeof(struct tcp_connect_and_offload_in); else req_memsize = sizeof(struct tcp_connect_and_offload_in_v1); nonemb_cmd.va = dma_alloc_coherent(&phba->ctrl.pdev->dev, req_memsize, &nonemb_cmd.dma, GFP_KERNEL); if (nonemb_cmd.va == NULL) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, "BS_%d : Failed to allocate memory for" " mgmt_open_connection\n"); beiscsi_free_ep(beiscsi_ep); return -ENOMEM; } nonemb_cmd.size = req_memsize; memset(nonemb_cmd.va, 0, nonemb_cmd.size); tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep, &nonemb_cmd); if (!tag) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, "BS_%d : mgmt_open_connection Failed for cid=%d\n", beiscsi_ep->ep_cid); dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size, nonemb_cmd.va, nonemb_cmd.dma); beiscsi_free_ep(beiscsi_ep); return -EAGAIN; } ret = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, "BS_%d : mgmt_open_connection Failed"); if (ret != -EBUSY) dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size, nonemb_cmd.va, nonemb_cmd.dma); beiscsi_free_ep(beiscsi_ep); return ret; } ptcpcnct_out = (struct tcp_connect_and_offload_out *)nonemb_cmd.va; beiscsi_ep = ep->dd_data; beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle; beiscsi_ep->cid_vld = 1; beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BS_%d : mgmt_open_connection Success\n"); dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size, nonemb_cmd.va, nonemb_cmd.dma); return 0; } /** * beiscsi_ep_connect - Ask chip to create TCP Conn * @shost: Pointer to scsi_host structure * @dst_addr: The IP address of Target * @non_blocking: blocking or non-blocking call * * This routines first asks chip to create a connection and then allocates an EP */ struct iscsi_endpoint * beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, int non_blocking) { struct beiscsi_hba *phba; struct beiscsi_endpoint *beiscsi_ep; struct iscsi_endpoint *ep; int ret; if (!shost) { ret = -ENXIO; pr_err("beiscsi_ep_connect shost is NULL\n"); return ERR_PTR(ret); } phba = iscsi_host_priv(shost); if (!beiscsi_hba_is_online(phba)) { ret = -EIO; beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BS_%d : HBA in error 0x%lx\n", phba->state); return ERR_PTR(ret); } if (!test_bit(BEISCSI_HBA_LINK_UP, &phba->state)) { ret = -EBUSY; beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, "BS_%d : The Adapter Port state is Down!!!\n"); return ERR_PTR(ret); } ep = iscsi_create_endpoint(sizeof(struct beiscsi_endpoint)); if (!ep) { ret = -ENOMEM; return ERR_PTR(ret); } beiscsi_ep = ep->dd_data; beiscsi_ep->phba = phba; beiscsi_ep->openiscsi_ep = ep; ret = beiscsi_open_conn(ep, NULL, dst_addr, non_blocking); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, "BS_%d : Failed in beiscsi_open_conn\n"); goto free_ep; } return ep; free_ep: iscsi_destroy_endpoint(ep); return ERR_PTR(ret); } /** * beiscsi_ep_poll - Poll to see if connection is established * @ep: endpoint to be used * @timeout_ms: timeout specified in millisecs * * Poll to see if TCP connection established */ int beiscsi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) { struct beiscsi_endpoint *beiscsi_ep = ep->dd_data; beiscsi_log(beiscsi_ep->phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BS_%d : In beiscsi_ep_poll\n"); if (beiscsi_ep->cid_vld == 1) return 1; else return 0; } /** * beiscsi_flush_cq()- Flush the CQ created. * @phba: ptr device priv structure. * * Before the connection resource are freed flush * all the CQ enteries **/ static void beiscsi_flush_cq(struct beiscsi_hba *phba) { uint16_t i; struct be_eq_obj *pbe_eq; struct hwi_controller *phwi_ctrlr; struct hwi_context_memory *phwi_context; phwi_ctrlr = phba->phwi_ctrlr; phwi_context = phwi_ctrlr->phwi_ctxt; for (i = 0; i < phba->num_cpus; i++) { pbe_eq = &phwi_context->be_eq[i]; irq_poll_disable(&pbe_eq->iopoll); beiscsi_process_cq(pbe_eq, BE2_MAX_NUM_CQ_PROC); irq_poll_enable(&pbe_eq->iopoll); } } /** * beiscsi_conn_close - Invalidate and upload connection * @beiscsi_ep: pointer to device endpoint struct * * Returns 0 on success, -1 on failure. */ static int beiscsi_conn_close(struct beiscsi_endpoint *beiscsi_ep) { struct beiscsi_hba *phba = beiscsi_ep->phba; unsigned int tag, attempts; int ret; /** * Without successfully invalidating and uploading connection * driver can't reuse the CID so attempt more than once. */ attempts = 0; while (attempts++ < 3) { tag = beiscsi_invalidate_cxn(phba, beiscsi_ep); if (tag) { ret = beiscsi_mccq_compl_wait(phba, tag, NULL, NULL); if (!ret) break; beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BS_%d : invalidate conn failed cid %d\n", beiscsi_ep->ep_cid); } } /* wait for all completions to arrive, then process them */ msleep(250); /* flush CQ entries */ beiscsi_flush_cq(phba); if (attempts > 3) return -1; attempts = 0; while (attempts++ < 3) { tag = beiscsi_upload_cxn(phba, beiscsi_ep); if (tag) { ret = beiscsi_mccq_compl_wait(phba, tag, NULL, NULL); if (!ret) break; beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BS_%d : upload conn failed cid %d\n", beiscsi_ep->ep_cid); } } if (attempts > 3) return -1; return 0; } /** * beiscsi_ep_disconnect - Tears down the TCP connection * @ep: endpoint to be used * * Tears down the TCP connection */ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep) { struct beiscsi_endpoint *beiscsi_ep; struct beiscsi_hba *phba; uint16_t cri_index; beiscsi_ep = ep->dd_data; phba = beiscsi_ep->phba; beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BS_%d : In beiscsi_ep_disconnect for ep_cid = %u\n", beiscsi_ep->ep_cid); cri_index = BE_GET_CRI_FROM_CID(beiscsi_ep->ep_cid); if (!phba->ep_array[cri_index]) { __beiscsi_log(phba, KERN_ERR, "BS_%d : ep_array at %u cid %u empty\n", cri_index, beiscsi_ep->ep_cid); return; } if (!beiscsi_hba_is_online(phba)) { beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BS_%d : HBA in error 0x%lx\n", phba->state); } else { /** * Make CID available even if close fails. * If not freed, FW might fail open using the CID. */ if (beiscsi_conn_close(beiscsi_ep) < 0) __beiscsi_log(phba, KERN_ERR, "BS_%d : close conn failed cid %d\n", beiscsi_ep->ep_cid); } beiscsi_free_ep(beiscsi_ep); if (!phba->conn_table[cri_index]) __beiscsi_log(phba, KERN_ERR, "BS_%d : conn_table empty at %u: cid %u\n", cri_index, beiscsi_ep->ep_cid); phba->conn_table[cri_index] = NULL; iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep); } umode_t beiscsi_attr_is_visible(int param_type, int param) { switch (param_type) { case ISCSI_NET_PARAM: switch (param) { case ISCSI_NET_PARAM_IFACE_ENABLE: case ISCSI_NET_PARAM_IPV4_ADDR: case ISCSI_NET_PARAM_IPV4_SUBNET: case ISCSI_NET_PARAM_IPV4_BOOTPROTO: case ISCSI_NET_PARAM_IPV4_GW: case ISCSI_NET_PARAM_IPV6_ADDR: case ISCSI_NET_PARAM_VLAN_ID: case ISCSI_NET_PARAM_VLAN_PRIORITY: case ISCSI_NET_PARAM_VLAN_ENABLED: return S_IRUGO; default: return 0; } case ISCSI_HOST_PARAM: switch (param) { case ISCSI_HOST_PARAM_HWADDRESS: case ISCSI_HOST_PARAM_INITIATOR_NAME: case ISCSI_HOST_PARAM_PORT_STATE: case ISCSI_HOST_PARAM_PORT_SPEED: return S_IRUGO; default: return 0; } case ISCSI_PARAM: switch (param) { case ISCSI_PARAM_MAX_RECV_DLENGTH: case ISCSI_PARAM_MAX_XMIT_DLENGTH: case ISCSI_PARAM_HDRDGST_EN: case ISCSI_PARAM_DATADGST_EN: case ISCSI_PARAM_CONN_ADDRESS: case ISCSI_PARAM_CONN_PORT: case ISCSI_PARAM_EXP_STATSN: case ISCSI_PARAM_PERSISTENT_ADDRESS: case ISCSI_PARAM_PERSISTENT_PORT: case ISCSI_PARAM_PING_TMO: case ISCSI_PARAM_RECV_TMO: case ISCSI_PARAM_INITIAL_R2T_EN: case ISCSI_PARAM_MAX_R2T: case ISCSI_PARAM_IMM_DATA_EN: case ISCSI_PARAM_FIRST_BURST: case ISCSI_PARAM_MAX_BURST: case ISCSI_PARAM_PDU_INORDER_EN: case ISCSI_PARAM_DATASEQ_INORDER_EN: case ISCSI_PARAM_ERL: case ISCSI_PARAM_TARGET_NAME: case ISCSI_PARAM_TPGT: case ISCSI_PARAM_USERNAME: case ISCSI_PARAM_PASSWORD: case ISCSI_PARAM_USERNAME_IN: case ISCSI_PARAM_PASSWORD_IN: case ISCSI_PARAM_FAST_ABORT: case ISCSI_PARAM_ABORT_TMO: case ISCSI_PARAM_LU_RESET_TMO: case ISCSI_PARAM_IFACE_NAME: case ISCSI_PARAM_INITIATOR_NAME: return S_IRUGO; default: return 0; } } return 0; }
linux-master
drivers/scsi/be2iscsi/be_iscsi.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic FCoE Offload Driver * Copyright (c) 2016-2018 Cavium Inc. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/device.h> #include <linux/highmem.h> #include <linux/crc32.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/kthread.h> #include <linux/phylink.h> #include <scsi/libfc.h> #include <scsi/scsi_host.h> #include <scsi/fc_frame.h> #include <linux/if_ether.h> #include <linux/if_vlan.h> #include <linux/cpu.h> #include "qedf.h" #include "qedf_dbg.h" #include <uapi/linux/pci_regs.h> const struct qed_fcoe_ops *qed_ops; static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id); static void qedf_remove(struct pci_dev *pdev); static void qedf_shutdown(struct pci_dev *pdev); static void qedf_schedule_recovery_handler(void *dev); static void qedf_recovery_handler(struct work_struct *work); static int qedf_suspend(struct pci_dev *pdev, pm_message_t state); /* * Driver module parameters. */ static unsigned int qedf_dev_loss_tmo = 60; module_param_named(dev_loss_tmo, qedf_dev_loss_tmo, int, S_IRUGO); MODULE_PARM_DESC(dev_loss_tmo, " dev_loss_tmo setting for attached " "remote ports (default 60)"); uint qedf_debug = QEDF_LOG_INFO; module_param_named(debug, qedf_debug, uint, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(debug, " Debug mask. Pass '1' to enable default debugging" " mask"); static uint qedf_fipvlan_retries = 60; module_param_named(fipvlan_retries, qedf_fipvlan_retries, int, S_IRUGO); MODULE_PARM_DESC(fipvlan_retries, " Number of FIP VLAN requests to attempt " "before giving up (default 60)"); static uint qedf_fallback_vlan = QEDF_FALLBACK_VLAN; module_param_named(fallback_vlan, qedf_fallback_vlan, int, S_IRUGO); MODULE_PARM_DESC(fallback_vlan, " VLAN ID to try if fip vlan request fails " "(default 1002)."); static int qedf_default_prio = -1; module_param_named(default_prio, qedf_default_prio, int, S_IRUGO); MODULE_PARM_DESC(default_prio, " Override 802.1q priority for FIP and FCoE" " traffic (value between 0 and 7, default 3)."); uint qedf_dump_frames; module_param_named(dump_frames, qedf_dump_frames, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(dump_frames, " Print the skb data of FIP and FCoE frames " "(default off)"); static uint qedf_queue_depth; module_param_named(queue_depth, qedf_queue_depth, int, S_IRUGO); MODULE_PARM_DESC(queue_depth, " Sets the queue depth for all LUNs discovered " "by the qedf driver. Default is 0 (use OS default)."); uint qedf_io_tracing; module_param_named(io_tracing, qedf_io_tracing, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(io_tracing, " Enable logging of SCSI requests/completions " "into trace buffer. (default off)."); static uint qedf_max_lun = MAX_FIBRE_LUNS; module_param_named(max_lun, qedf_max_lun, int, S_IRUGO); MODULE_PARM_DESC(max_lun, " Sets the maximum luns per target that the driver " "supports. (default 0xffffffff)"); uint qedf_link_down_tmo; module_param_named(link_down_tmo, qedf_link_down_tmo, int, S_IRUGO); MODULE_PARM_DESC(link_down_tmo, " Delays informing the fcoe transport that the " "link is down by N seconds."); bool qedf_retry_delay; module_param_named(retry_delay, qedf_retry_delay, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(retry_delay, " Enable/disable handling of FCP_RSP IU retry " "delay handling (default off)."); static bool qedf_dcbx_no_wait; module_param_named(dcbx_no_wait, qedf_dcbx_no_wait, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(dcbx_no_wait, " Do not wait for DCBX convergence to start " "sending FIP VLAN requests on link up (Default: off)."); static uint qedf_dp_module; module_param_named(dp_module, qedf_dp_module, uint, S_IRUGO); MODULE_PARM_DESC(dp_module, " bit flags control for verbose printk passed " "qed module during probe."); static uint qedf_dp_level = QED_LEVEL_NOTICE; module_param_named(dp_level, qedf_dp_level, uint, S_IRUGO); MODULE_PARM_DESC(dp_level, " printk verbosity control passed to qed module " "during probe (0-3: 0 more verbose)."); static bool qedf_enable_recovery = true; module_param_named(enable_recovery, qedf_enable_recovery, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(enable_recovery, "Enable/disable recovery on driver/firmware " "interface level errors 0 = Disabled, 1 = Enabled (Default: 1)."); struct workqueue_struct *qedf_io_wq; static struct fcoe_percpu_s qedf_global; static DEFINE_SPINLOCK(qedf_global_lock); static struct kmem_cache *qedf_io_work_cache; void qedf_set_vlan_id(struct qedf_ctx *qedf, int vlan_id) { int vlan_id_tmp = 0; vlan_id_tmp = vlan_id | (qedf->prio << VLAN_PRIO_SHIFT); qedf->vlan_id = vlan_id_tmp; QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Setting vlan_id=0x%04x prio=%d.\n", vlan_id_tmp, qedf->prio); } /* Returns true if we have a valid vlan, false otherwise */ static bool qedf_initiate_fipvlan_req(struct qedf_ctx *qedf) { while (qedf->fipvlan_retries--) { /* This is to catch if link goes down during fipvlan retries */ if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) { QEDF_ERR(&qedf->dbg_ctx, "Link not up.\n"); return false; } if (test_bit(QEDF_UNLOADING, &qedf->flags)) { QEDF_ERR(&qedf->dbg_ctx, "Driver unloading.\n"); return false; } if (qedf->vlan_id > 0) { QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "vlan = 0x%x already set, calling ctlr_link_up.\n", qedf->vlan_id); if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) fcoe_ctlr_link_up(&qedf->ctlr); return true; } QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Retry %d.\n", qedf->fipvlan_retries); init_completion(&qedf->fipvlan_compl); qedf_fcoe_send_vlan_req(qedf); wait_for_completion_timeout(&qedf->fipvlan_compl, 1 * HZ); } return false; } static void qedf_handle_link_update(struct work_struct *work) { struct qedf_ctx *qedf = container_of(work, struct qedf_ctx, link_update.work); int rc; QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Entered. link_state=%d.\n", atomic_read(&qedf->link_state)); if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) { rc = qedf_initiate_fipvlan_req(qedf); if (rc) return; if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) { QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Link is down, resetting vlan_id.\n"); qedf->vlan_id = 0; return; } /* * If we get here then we never received a repsonse to our * fip vlan request so set the vlan_id to the default and * tell FCoE that the link is up */ QEDF_WARN(&(qedf->dbg_ctx), "Did not receive FIP VLAN " "response, falling back to default VLAN %d.\n", qedf_fallback_vlan); qedf_set_vlan_id(qedf, qedf_fallback_vlan); /* * Zero out data_src_addr so we'll update it with the new * lport port_id */ eth_zero_addr(qedf->data_src_addr); fcoe_ctlr_link_up(&qedf->ctlr); } else if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) { /* * If we hit here and link_down_tmo_valid is still 1 it means * that link_down_tmo timed out so set it to 0 to make sure any * other readers have accurate state. */ atomic_set(&qedf->link_down_tmo_valid, 0); QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Calling fcoe_ctlr_link_down().\n"); fcoe_ctlr_link_down(&qedf->ctlr); if (qedf_wait_for_upload(qedf) == false) QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n"); /* Reset the number of FIP VLAN retries */ qedf->fipvlan_retries = qedf_fipvlan_retries; } } #define QEDF_FCOE_MAC_METHOD_GRANGED_MAC 1 #define QEDF_FCOE_MAC_METHOD_FCF_MAP 2 #define QEDF_FCOE_MAC_METHOD_FCOE_SET_MAC 3 static void qedf_set_data_src_addr(struct qedf_ctx *qedf, struct fc_frame *fp) { u8 *granted_mac; struct fc_frame_header *fh = fc_frame_header_get(fp); u8 fc_map[3]; int method = 0; /* Get granted MAC address from FIP FLOGI payload */ granted_mac = fr_cb(fp)->granted_mac; /* * We set the source MAC for FCoE traffic based on the Granted MAC * address from the switch. * * If granted_mac is non-zero, we used that. * If the granted_mac is zeroed out, created the FCoE MAC based on * the sel_fcf->fc_map and the d_id fo the FLOGI frame. * If sel_fcf->fc_map is 0 then we use the default FCF-MAC plus the * d_id of the FLOGI frame. */ if (!is_zero_ether_addr(granted_mac)) { ether_addr_copy(qedf->data_src_addr, granted_mac); method = QEDF_FCOE_MAC_METHOD_GRANGED_MAC; } else if (qedf->ctlr.sel_fcf->fc_map != 0) { hton24(fc_map, qedf->ctlr.sel_fcf->fc_map); qedf->data_src_addr[0] = fc_map[0]; qedf->data_src_addr[1] = fc_map[1]; qedf->data_src_addr[2] = fc_map[2]; qedf->data_src_addr[3] = fh->fh_d_id[0]; qedf->data_src_addr[4] = fh->fh_d_id[1]; qedf->data_src_addr[5] = fh->fh_d_id[2]; method = QEDF_FCOE_MAC_METHOD_FCF_MAP; } else { fc_fcoe_set_mac(qedf->data_src_addr, fh->fh_d_id); method = QEDF_FCOE_MAC_METHOD_FCOE_SET_MAC; } QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "QEDF data_src_mac=%pM method=%d.\n", qedf->data_src_addr, method); } static void qedf_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) { struct fc_exch *exch = fc_seq_exch(seq); struct fc_lport *lport = exch->lp; struct qedf_ctx *qedf = lport_priv(lport); if (!qedf) { QEDF_ERR(NULL, "qedf is NULL.\n"); return; } /* * If ERR_PTR is set then don't try to stat anything as it will cause * a crash when we access fp. */ if (IS_ERR(fp)) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "fp has IS_ERR() set.\n"); goto skip_stat; } /* Log stats for FLOGI reject */ if (fc_frame_payload_op(fp) == ELS_LS_RJT) qedf->flogi_failed++; else if (fc_frame_payload_op(fp) == ELS_LS_ACC) { /* Set the source MAC we will use for FCoE traffic */ qedf_set_data_src_addr(qedf, fp); qedf->flogi_pending = 0; } /* Complete flogi_compl so we can proceed to sending ADISCs */ complete(&qedf->flogi_compl); skip_stat: /* Report response to libfc */ fc_lport_flogi_resp(seq, fp, lport); } static struct fc_seq *qedf_elsct_send(struct fc_lport *lport, u32 did, struct fc_frame *fp, unsigned int op, void (*resp)(struct fc_seq *, struct fc_frame *, void *), void *arg, u32 timeout) { struct qedf_ctx *qedf = lport_priv(lport); /* * Intercept FLOGI for statistic purposes. Note we use the resp * callback to tell if this is really a flogi. */ if (resp == fc_lport_flogi_resp) { qedf->flogi_cnt++; if (qedf->flogi_pending >= QEDF_FLOGI_RETRY_CNT) { schedule_delayed_work(&qedf->stag_work, 2); return NULL; } qedf->flogi_pending++; return fc_elsct_send(lport, did, fp, op, qedf_flogi_resp, arg, timeout); } return fc_elsct_send(lport, did, fp, op, resp, arg, timeout); } int qedf_send_flogi(struct qedf_ctx *qedf) { struct fc_lport *lport; struct fc_frame *fp; lport = qedf->lport; if (!lport->tt.elsct_send) { QEDF_ERR(&qedf->dbg_ctx, "tt.elsct_send not set.\n"); return -EINVAL; } fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi)); if (!fp) { QEDF_ERR(&(qedf->dbg_ctx), "fc_frame_alloc failed.\n"); return -ENOMEM; } QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending FLOGI to reestablish session with switch.\n"); lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_FLOGI, qedf_flogi_resp, lport, lport->r_a_tov); init_completion(&qedf->flogi_compl); return 0; } /* * This function is called if link_down_tmo is in use. If we get a link up and * link_down_tmo has not expired then use just FLOGI/ADISC to recover our * sessions with targets. Otherwise, just call fcoe_ctlr_link_up(). */ static void qedf_link_recovery(struct work_struct *work) { struct qedf_ctx *qedf = container_of(work, struct qedf_ctx, link_recovery.work); struct fc_lport *lport = qedf->lport; struct fc_rport_priv *rdata; bool rc; int retries = 30; int rval, i; struct list_head rdata_login_list; INIT_LIST_HEAD(&rdata_login_list); QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Link down tmo did not expire.\n"); /* * Essentially reset the fcoe_ctlr here without affecting the state * of the libfc structs. */ qedf->ctlr.state = FIP_ST_LINK_WAIT; fcoe_ctlr_link_down(&qedf->ctlr); /* * Bring the link up before we send the fipvlan request so libfcoe * can select a new fcf in parallel */ fcoe_ctlr_link_up(&qedf->ctlr); /* Since the link when down and up to verify which vlan we're on */ qedf->fipvlan_retries = qedf_fipvlan_retries; rc = qedf_initiate_fipvlan_req(qedf); /* If getting the VLAN fails, set the VLAN to the fallback one */ if (!rc) qedf_set_vlan_id(qedf, qedf_fallback_vlan); /* * We need to wait for an FCF to be selected due to the * fcoe_ctlr_link_up other the FLOGI will be rejected. */ while (retries > 0) { if (qedf->ctlr.sel_fcf) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "FCF reselected, proceeding with FLOGI.\n"); break; } msleep(500); retries--; } if (retries < 1) { QEDF_ERR(&(qedf->dbg_ctx), "Exhausted retries waiting for " "FCF selection.\n"); return; } rval = qedf_send_flogi(qedf); if (rval) return; /* Wait for FLOGI completion before proceeding with sending ADISCs */ i = wait_for_completion_timeout(&qedf->flogi_compl, qedf->lport->r_a_tov); if (i == 0) { QEDF_ERR(&(qedf->dbg_ctx), "FLOGI timed out.\n"); return; } /* * Call lport->tt.rport_login which will cause libfc to send an * ADISC since the rport is in state ready. */ mutex_lock(&lport->disc.disc_mutex); list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) { if (kref_get_unless_zero(&rdata->kref)) { fc_rport_login(rdata); kref_put(&rdata->kref, fc_rport_destroy); } } mutex_unlock(&lport->disc.disc_mutex); } static void qedf_update_link_speed(struct qedf_ctx *qedf, struct qed_link_output *link) { __ETHTOOL_DECLARE_LINK_MODE_MASK(sup_caps); struct fc_lport *lport = qedf->lport; lport->link_speed = FC_PORTSPEED_UNKNOWN; lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN; /* Set fc_host link speed */ switch (link->speed) { case 10000: lport->link_speed = FC_PORTSPEED_10GBIT; break; case 25000: lport->link_speed = FC_PORTSPEED_25GBIT; break; case 40000: lport->link_speed = FC_PORTSPEED_40GBIT; break; case 50000: lport->link_speed = FC_PORTSPEED_50GBIT; break; case 100000: lport->link_speed = FC_PORTSPEED_100GBIT; break; case 20000: lport->link_speed = FC_PORTSPEED_20GBIT; break; default: lport->link_speed = FC_PORTSPEED_UNKNOWN; break; } /* * Set supported link speed by querying the supported * capabilities of the link. */ phylink_zero(sup_caps); phylink_set(sup_caps, 10000baseT_Full); phylink_set(sup_caps, 10000baseKX4_Full); phylink_set(sup_caps, 10000baseR_FEC); phylink_set(sup_caps, 10000baseCR_Full); phylink_set(sup_caps, 10000baseSR_Full); phylink_set(sup_caps, 10000baseLR_Full); phylink_set(sup_caps, 10000baseLRM_Full); phylink_set(sup_caps, 10000baseKR_Full); if (linkmode_intersects(link->supported_caps, sup_caps)) lport->link_supported_speeds |= FC_PORTSPEED_10GBIT; phylink_zero(sup_caps); phylink_set(sup_caps, 25000baseKR_Full); phylink_set(sup_caps, 25000baseCR_Full); phylink_set(sup_caps, 25000baseSR_Full); if (linkmode_intersects(link->supported_caps, sup_caps)) lport->link_supported_speeds |= FC_PORTSPEED_25GBIT; phylink_zero(sup_caps); phylink_set(sup_caps, 40000baseLR4_Full); phylink_set(sup_caps, 40000baseKR4_Full); phylink_set(sup_caps, 40000baseCR4_Full); phylink_set(sup_caps, 40000baseSR4_Full); if (linkmode_intersects(link->supported_caps, sup_caps)) lport->link_supported_speeds |= FC_PORTSPEED_40GBIT; phylink_zero(sup_caps); phylink_set(sup_caps, 50000baseKR2_Full); phylink_set(sup_caps, 50000baseCR2_Full); phylink_set(sup_caps, 50000baseSR2_Full); if (linkmode_intersects(link->supported_caps, sup_caps)) lport->link_supported_speeds |= FC_PORTSPEED_50GBIT; phylink_zero(sup_caps); phylink_set(sup_caps, 100000baseKR4_Full); phylink_set(sup_caps, 100000baseSR4_Full); phylink_set(sup_caps, 100000baseCR4_Full); phylink_set(sup_caps, 100000baseLR4_ER4_Full); if (linkmode_intersects(link->supported_caps, sup_caps)) lport->link_supported_speeds |= FC_PORTSPEED_100GBIT; phylink_zero(sup_caps); phylink_set(sup_caps, 20000baseKR2_Full); if (linkmode_intersects(link->supported_caps, sup_caps)) lport->link_supported_speeds |= FC_PORTSPEED_20GBIT; if (lport->host && lport->host->shost_data) fc_host_supported_speeds(lport->host) = lport->link_supported_speeds; } static void qedf_bw_update(void *dev) { struct qedf_ctx *qedf = (struct qedf_ctx *)dev; struct qed_link_output link; /* Get the latest status of the link */ qed_ops->common->get_link(qedf->cdev, &link); if (test_bit(QEDF_UNLOADING, &qedf->flags)) { QEDF_ERR(&qedf->dbg_ctx, "Ignore link update, driver getting unload.\n"); return; } if (link.link_up) { if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) qedf_update_link_speed(qedf, &link); else QEDF_ERR(&qedf->dbg_ctx, "Ignore bw update, link is down.\n"); } else { QEDF_ERR(&qedf->dbg_ctx, "link_up is not set.\n"); } } static void qedf_link_update(void *dev, struct qed_link_output *link) { struct qedf_ctx *qedf = (struct qedf_ctx *)dev; /* * Prevent race where we're removing the module and we get link update * for qed. */ if (test_bit(QEDF_UNLOADING, &qedf->flags)) { QEDF_ERR(&qedf->dbg_ctx, "Ignore link update, driver getting unload.\n"); return; } if (link->link_up) { if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) { QEDF_INFO((&qedf->dbg_ctx), QEDF_LOG_DISC, "Ignoring link up event as link is already up.\n"); return; } QEDF_ERR(&(qedf->dbg_ctx), "LINK UP (%d GB/s).\n", link->speed / 1000); /* Cancel any pending link down work */ cancel_delayed_work(&qedf->link_update); atomic_set(&qedf->link_state, QEDF_LINK_UP); qedf_update_link_speed(qedf, link); if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE || qedf_dcbx_no_wait) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "DCBx done.\n"); if (atomic_read(&qedf->link_down_tmo_valid) > 0) queue_delayed_work(qedf->link_update_wq, &qedf->link_recovery, 0); else queue_delayed_work(qedf->link_update_wq, &qedf->link_update, 0); atomic_set(&qedf->link_down_tmo_valid, 0); } } else { QEDF_ERR(&(qedf->dbg_ctx), "LINK DOWN.\n"); atomic_set(&qedf->link_state, QEDF_LINK_DOWN); atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING); /* * Flag that we're waiting for the link to come back up before * informing the fcoe layer of the event. */ if (qedf_link_down_tmo > 0) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Starting link down tmo.\n"); atomic_set(&qedf->link_down_tmo_valid, 1); } qedf->vlan_id = 0; qedf_update_link_speed(qedf, link); queue_delayed_work(qedf->link_update_wq, &qedf->link_update, qedf_link_down_tmo * HZ); } } static void qedf_dcbx_handler(void *dev, struct qed_dcbx_get *get, u32 mib_type) { struct qedf_ctx *qedf = (struct qedf_ctx *)dev; u8 tmp_prio; QEDF_ERR(&(qedf->dbg_ctx), "DCBx event valid=%d enabled=%d fcoe " "prio=%d.\n", get->operational.valid, get->operational.enabled, get->operational.app_prio.fcoe); if (get->operational.enabled && get->operational.valid) { /* If DCBX was already negotiated on link up then just exit */ if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "DCBX already set on link up.\n"); return; } atomic_set(&qedf->dcbx, QEDF_DCBX_DONE); /* * Set the 8021q priority in the following manner: * * 1. If a modparam is set use that * 2. If the value is not between 0..7 use the default * 3. Use the priority we get from the DCBX app tag */ tmp_prio = get->operational.app_prio.fcoe; if (qedf_default_prio > -1) qedf->prio = qedf_default_prio; else if (tmp_prio > 7) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "FIP/FCoE prio %d out of range, setting to %d.\n", tmp_prio, QEDF_DEFAULT_PRIO); qedf->prio = QEDF_DEFAULT_PRIO; } else qedf->prio = tmp_prio; if (atomic_read(&qedf->link_state) == QEDF_LINK_UP && !qedf_dcbx_no_wait) { if (atomic_read(&qedf->link_down_tmo_valid) > 0) queue_delayed_work(qedf->link_update_wq, &qedf->link_recovery, 0); else queue_delayed_work(qedf->link_update_wq, &qedf->link_update, 0); atomic_set(&qedf->link_down_tmo_valid, 0); } } } static u32 qedf_get_login_failures(void *cookie) { struct qedf_ctx *qedf; qedf = (struct qedf_ctx *)cookie; return qedf->flogi_failed; } static struct qed_fcoe_cb_ops qedf_cb_ops = { { .link_update = qedf_link_update, .bw_update = qedf_bw_update, .schedule_recovery_handler = qedf_schedule_recovery_handler, .dcbx_aen = qedf_dcbx_handler, .get_generic_tlv_data = qedf_get_generic_tlv_data, .get_protocol_tlv_data = qedf_get_protocol_tlv_data, .schedule_hw_err_handler = qedf_schedule_hw_err_handler, } }; /* * Various transport templates. */ static struct scsi_transport_template *qedf_fc_transport_template; static struct scsi_transport_template *qedf_fc_vport_transport_template; /* * SCSI EH handlers */ static int qedf_eh_abort(struct scsi_cmnd *sc_cmd) { struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); struct fc_lport *lport; struct qedf_ctx *qedf; struct qedf_ioreq *io_req; struct fc_rport_libfc_priv *rp = rport->dd_data; struct fc_rport_priv *rdata; struct qedf_rport *fcport = NULL; int rc = FAILED; int wait_count = 100; int refcount = 0; int rval; int got_ref = 0; lport = shost_priv(sc_cmd->device->host); qedf = (struct qedf_ctx *)lport_priv(lport); /* rport and tgt are allocated together, so tgt should be non-NULL */ fcport = (struct qedf_rport *)&rp[1]; rdata = fcport->rdata; if (!rdata || !kref_get_unless_zero(&rdata->kref)) { QEDF_ERR(&qedf->dbg_ctx, "stale rport, sc_cmd=%p\n", sc_cmd); rc = SUCCESS; goto out; } io_req = qedf_priv(sc_cmd)->io_req; if (!io_req) { QEDF_ERR(&qedf->dbg_ctx, "sc_cmd not queued with lld, sc_cmd=%p op=0x%02x, port_id=%06x\n", sc_cmd, sc_cmd->cmnd[0], rdata->ids.port_id); rc = SUCCESS; goto drop_rdata_kref; } rval = kref_get_unless_zero(&io_req->refcount); /* ID: 005 */ if (rval) got_ref = 1; /* If we got a valid io_req, confirm it belongs to this sc_cmd. */ if (!rval || io_req->sc_cmd != sc_cmd) { QEDF_ERR(&qedf->dbg_ctx, "Freed/Incorrect io_req, io_req->sc_cmd=%p, sc_cmd=%p, port_id=%06x, bailing out.\n", io_req->sc_cmd, sc_cmd, rdata->ids.port_id); goto drop_rdata_kref; } if (fc_remote_port_chkready(rport)) { refcount = kref_read(&io_req->refcount); QEDF_ERR(&qedf->dbg_ctx, "rport not ready, io_req=%p, xid=0x%x sc_cmd=%p op=0x%02x, refcount=%d, port_id=%06x\n", io_req, io_req->xid, sc_cmd, sc_cmd->cmnd[0], refcount, rdata->ids.port_id); goto drop_rdata_kref; } rc = fc_block_scsi_eh(sc_cmd); if (rc) goto drop_rdata_kref; if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { QEDF_ERR(&qedf->dbg_ctx, "Connection uploading, xid=0x%x., port_id=%06x\n", io_req->xid, rdata->ids.port_id); while (io_req->sc_cmd && (wait_count != 0)) { msleep(100); wait_count--; } if (wait_count) { QEDF_ERR(&qedf->dbg_ctx, "ABTS succeeded\n"); rc = SUCCESS; } else { QEDF_ERR(&qedf->dbg_ctx, "ABTS failed\n"); rc = FAILED; } goto drop_rdata_kref; } if (lport->state != LPORT_ST_READY || !(lport->link_up)) { QEDF_ERR(&qedf->dbg_ctx, "link not ready.\n"); goto drop_rdata_kref; } QEDF_ERR(&qedf->dbg_ctx, "Aborting io_req=%p sc_cmd=%p xid=0x%x fp_idx=%d, port_id=%06x.\n", io_req, sc_cmd, io_req->xid, io_req->fp_idx, rdata->ids.port_id); if (qedf->stop_io_on_error) { qedf_stop_all_io(qedf); rc = SUCCESS; goto drop_rdata_kref; } init_completion(&io_req->abts_done); rval = qedf_initiate_abts(io_req, true); if (rval) { QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n"); /* * If we fail to queue the ABTS then return this command to * the SCSI layer as it will own and free the xid */ rc = SUCCESS; qedf_scsi_done(qedf, io_req, DID_ERROR); goto drop_rdata_kref; } wait_for_completion(&io_req->abts_done); if (io_req->event == QEDF_IOREQ_EV_ABORT_SUCCESS || io_req->event == QEDF_IOREQ_EV_ABORT_FAILED || io_req->event == QEDF_IOREQ_EV_CLEANUP_SUCCESS) { /* * If we get a reponse to the abort this is success from * the perspective that all references to the command have * been removed from the driver and firmware */ rc = SUCCESS; } else { /* If the abort and cleanup failed then return a failure */ rc = FAILED; } if (rc == SUCCESS) QEDF_ERR(&(qedf->dbg_ctx), "ABTS succeeded, xid=0x%x.\n", io_req->xid); else QEDF_ERR(&(qedf->dbg_ctx), "ABTS failed, xid=0x%x.\n", io_req->xid); drop_rdata_kref: kref_put(&rdata->kref, fc_rport_destroy); out: if (got_ref) kref_put(&io_req->refcount, qedf_release_cmd); return rc; } static int qedf_eh_target_reset(struct scsi_cmnd *sc_cmd) { QEDF_ERR(NULL, "%d:0:%d:%lld: TARGET RESET Issued...", sc_cmd->device->host->host_no, sc_cmd->device->id, sc_cmd->device->lun); return qedf_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET); } static int qedf_eh_device_reset(struct scsi_cmnd *sc_cmd) { QEDF_ERR(NULL, "%d:0:%d:%lld: LUN RESET Issued... ", sc_cmd->device->host->host_no, sc_cmd->device->id, sc_cmd->device->lun); return qedf_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET); } bool qedf_wait_for_upload(struct qedf_ctx *qedf) { struct qedf_rport *fcport; int wait_cnt = 120; while (wait_cnt--) { if (atomic_read(&qedf->num_offloads)) QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Waiting for all uploads to complete num_offloads = 0x%x.\n", atomic_read(&qedf->num_offloads)); else return true; msleep(500); } rcu_read_lock(); list_for_each_entry_rcu(fcport, &qedf->fcports, peers) { if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { if (fcport->rdata) QEDF_ERR(&qedf->dbg_ctx, "Waiting for fcport %p portid=%06x.\n", fcport, fcport->rdata->ids.port_id); } else { QEDF_ERR(&qedf->dbg_ctx, "Waiting for fcport %p.\n", fcport); } } rcu_read_unlock(); return false; } /* Performs soft reset of qedf_ctx by simulating a link down/up */ void qedf_ctx_soft_reset(struct fc_lport *lport) { struct qedf_ctx *qedf; struct qed_link_output if_link; if (lport->vport) { printk_ratelimited("Cannot issue host reset on NPIV port.\n"); return; } qedf = lport_priv(lport); qedf->flogi_pending = 0; /* For host reset, essentially do a soft link up/down */ atomic_set(&qedf->link_state, QEDF_LINK_DOWN); QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Queuing link down work.\n"); queue_delayed_work(qedf->link_update_wq, &qedf->link_update, 0); if (qedf_wait_for_upload(qedf) == false) { QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n"); WARN_ON(atomic_read(&qedf->num_offloads)); } /* Before setting link up query physical link state */ qed_ops->common->get_link(qedf->cdev, &if_link); /* Bail if the physical link is not up */ if (!if_link.link_up) { QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Physical link is not up.\n"); return; } /* Flush and wait to make sure link down is processed */ flush_delayed_work(&qedf->link_update); msleep(500); atomic_set(&qedf->link_state, QEDF_LINK_UP); qedf->vlan_id = 0; QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Queue link up work.\n"); queue_delayed_work(qedf->link_update_wq, &qedf->link_update, 0); } /* Reset the host by gracefully logging out and then logging back in */ static int qedf_eh_host_reset(struct scsi_cmnd *sc_cmd) { struct fc_lport *lport; struct qedf_ctx *qedf; lport = shost_priv(sc_cmd->device->host); qedf = lport_priv(lport); if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN || test_bit(QEDF_UNLOADING, &qedf->flags)) return FAILED; QEDF_ERR(&(qedf->dbg_ctx), "HOST RESET Issued..."); qedf_ctx_soft_reset(lport); return SUCCESS; } static int qedf_slave_configure(struct scsi_device *sdev) { if (qedf_queue_depth) { scsi_change_queue_depth(sdev, qedf_queue_depth); } return 0; } static const struct scsi_host_template qedf_host_template = { .module = THIS_MODULE, .name = QEDF_MODULE_NAME, .this_id = -1, .cmd_per_lun = 32, .max_sectors = 0xffff, .queuecommand = qedf_queuecommand, .shost_groups = qedf_host_groups, .eh_abort_handler = qedf_eh_abort, .eh_device_reset_handler = qedf_eh_device_reset, /* lun reset */ .eh_target_reset_handler = qedf_eh_target_reset, /* target reset */ .eh_host_reset_handler = qedf_eh_host_reset, .slave_configure = qedf_slave_configure, .dma_boundary = QED_HW_DMA_BOUNDARY, .sg_tablesize = QEDF_MAX_BDS_PER_CMD, .can_queue = FCOE_PARAMS_NUM_TASKS, .change_queue_depth = scsi_change_queue_depth, .cmd_size = sizeof(struct qedf_cmd_priv), }; static int qedf_get_paged_crc_eof(struct sk_buff *skb, int tlen) { int rc; spin_lock(&qedf_global_lock); rc = fcoe_get_paged_crc_eof(skb, tlen, &qedf_global); spin_unlock(&qedf_global_lock); return rc; } static struct qedf_rport *qedf_fcport_lookup(struct qedf_ctx *qedf, u32 port_id) { struct qedf_rport *fcport; struct fc_rport_priv *rdata; rcu_read_lock(); list_for_each_entry_rcu(fcport, &qedf->fcports, peers) { rdata = fcport->rdata; if (rdata == NULL) continue; if (rdata->ids.port_id == port_id) { rcu_read_unlock(); return fcport; } } rcu_read_unlock(); /* Return NULL to caller to let them know fcport was not found */ return NULL; } /* Transmits an ELS frame over an offloaded session */ static int qedf_xmit_l2_frame(struct qedf_rport *fcport, struct fc_frame *fp) { struct fc_frame_header *fh; int rc = 0; fh = fc_frame_header_get(fp); if ((fh->fh_type == FC_TYPE_ELS) && (fh->fh_r_ctl == FC_RCTL_ELS_REQ)) { switch (fc_frame_payload_op(fp)) { case ELS_ADISC: qedf_send_adisc(fcport, fp); rc = 1; break; } } return rc; } /* * qedf_xmit - qedf FCoE frame transmit function */ static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp) { struct fc_lport *base_lport; struct qedf_ctx *qedf; struct ethhdr *eh; struct fcoe_crc_eof *cp; struct sk_buff *skb; struct fc_frame_header *fh; struct fcoe_hdr *hp; u8 sof, eof; u32 crc; unsigned int hlen, tlen, elen; int wlen; struct fc_lport *tmp_lport; struct fc_lport *vn_port = NULL; struct qedf_rport *fcport; int rc; u16 vlan_tci = 0; qedf = (struct qedf_ctx *)lport_priv(lport); fh = fc_frame_header_get(fp); skb = fp_skb(fp); /* Filter out traffic to other NPIV ports on the same host */ if (lport->vport) base_lport = shost_priv(vport_to_shost(lport->vport)); else base_lport = lport; /* Flag if the destination is the base port */ if (base_lport->port_id == ntoh24(fh->fh_d_id)) { vn_port = base_lport; } else { /* Got through the list of vports attached to the base_lport * and see if we have a match with the destination address. */ list_for_each_entry(tmp_lport, &base_lport->vports, list) { if (tmp_lport->port_id == ntoh24(fh->fh_d_id)) { vn_port = tmp_lport; break; } } } if (vn_port && ntoh24(fh->fh_d_id) != FC_FID_FLOGI) { struct fc_rport_priv *rdata = NULL; QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "Dropping FCoE frame to %06x.\n", ntoh24(fh->fh_d_id)); kfree_skb(skb); rdata = fc_rport_lookup(lport, ntoh24(fh->fh_d_id)); if (rdata) { rdata->retries = lport->max_rport_retry_count; kref_put(&rdata->kref, fc_rport_destroy); } return -EINVAL; } /* End NPIV filtering */ if (!qedf->ctlr.sel_fcf) { kfree_skb(skb); return 0; } if (!test_bit(QEDF_LL2_STARTED, &qedf->flags)) { QEDF_WARN(&(qedf->dbg_ctx), "LL2 not started\n"); kfree_skb(skb); return 0; } if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) { QEDF_WARN(&(qedf->dbg_ctx), "qedf link down\n"); kfree_skb(skb); return 0; } if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) { if (fcoe_ctlr_els_send(&qedf->ctlr, lport, skb)) return 0; } /* Check to see if this needs to be sent on an offloaded session */ fcport = qedf_fcport_lookup(qedf, ntoh24(fh->fh_d_id)); if (fcport && test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { rc = qedf_xmit_l2_frame(fcport, fp); /* * If the frame was successfully sent over the middle path * then do not try to also send it over the LL2 path */ if (rc) return 0; } sof = fr_sof(fp); eof = fr_eof(fp); elen = sizeof(struct ethhdr); hlen = sizeof(struct fcoe_hdr); tlen = sizeof(struct fcoe_crc_eof); wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE; skb->ip_summed = CHECKSUM_NONE; crc = fcoe_fc_crc(fp); /* copy port crc and eof to the skb buff */ if (skb_is_nonlinear(skb)) { skb_frag_t *frag; if (qedf_get_paged_crc_eof(skb, tlen)) { kfree_skb(skb); return -ENOMEM; } frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; cp = kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag); } else { cp = skb_put(skb, tlen); } memset(cp, 0, sizeof(*cp)); cp->fcoe_eof = eof; cp->fcoe_crc32 = cpu_to_le32(~crc); if (skb_is_nonlinear(skb)) { kunmap_atomic(cp); cp = NULL; } /* adjust skb network/transport offsets to match mac/fcoe/port */ skb_push(skb, elen + hlen); skb_reset_mac_header(skb); skb_reset_network_header(skb); skb->mac_len = elen; skb->protocol = htons(ETH_P_FCOE); /* * Add VLAN tag to non-offload FCoE frame based on current stored VLAN * for FIP/FCoE traffic. */ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), qedf->vlan_id); /* fill up mac and fcoe headers */ eh = eth_hdr(skb); eh->h_proto = htons(ETH_P_FCOE); if (qedf->ctlr.map_dest) fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id); else /* insert GW address */ ether_addr_copy(eh->h_dest, qedf->ctlr.dest_addr); /* Set the source MAC address */ ether_addr_copy(eh->h_source, qedf->data_src_addr); hp = (struct fcoe_hdr *)(eh + 1); memset(hp, 0, sizeof(*hp)); if (FC_FCOE_VER) FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER); hp->fcoe_sof = sof; /*update tx stats */ this_cpu_inc(lport->stats->TxFrames); this_cpu_add(lport->stats->TxWords, wlen); /* Get VLAN ID from skb for printing purposes */ __vlan_hwaccel_get_tag(skb, &vlan_tci); /* send down to lld */ fr_dev(fp) = lport; QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FCoE frame send: " "src=%06x dest=%06x r_ctl=%x type=%x vlan=%04x.\n", ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl, fh->fh_type, vlan_tci); if (qedf_dump_frames) print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16, 1, skb->data, skb->len, false); rc = qed_ops->ll2->start_xmit(qedf->cdev, skb, 0); if (rc) { QEDF_ERR(&qedf->dbg_ctx, "start_xmit failed rc = %d.\n", rc); kfree_skb(skb); return rc; } return 0; } static int qedf_alloc_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport) { int rval = 0; u32 *pbl; dma_addr_t page; int num_pages; /* Calculate appropriate queue and PBL sizes */ fcport->sq_mem_size = SQ_NUM_ENTRIES * sizeof(struct fcoe_wqe); fcport->sq_mem_size = ALIGN(fcport->sq_mem_size, QEDF_PAGE_SIZE); fcport->sq_pbl_size = (fcport->sq_mem_size / QEDF_PAGE_SIZE) * sizeof(void *); fcport->sq_pbl_size = fcport->sq_pbl_size + QEDF_PAGE_SIZE; fcport->sq = dma_alloc_coherent(&qedf->pdev->dev, fcport->sq_mem_size, &fcport->sq_dma, GFP_KERNEL); if (!fcport->sq) { QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue.\n"); rval = 1; goto out; } fcport->sq_pbl = dma_alloc_coherent(&qedf->pdev->dev, fcport->sq_pbl_size, &fcport->sq_pbl_dma, GFP_KERNEL); if (!fcport->sq_pbl) { QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue PBL.\n"); rval = 1; goto out_free_sq; } /* Create PBL */ num_pages = fcport->sq_mem_size / QEDF_PAGE_SIZE; page = fcport->sq_dma; pbl = (u32 *)fcport->sq_pbl; while (num_pages--) { *pbl = U64_LO(page); pbl++; *pbl = U64_HI(page); pbl++; page += QEDF_PAGE_SIZE; } return rval; out_free_sq: dma_free_coherent(&qedf->pdev->dev, fcport->sq_mem_size, fcport->sq, fcport->sq_dma); out: return rval; } static void qedf_free_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport) { if (fcport->sq_pbl) dma_free_coherent(&qedf->pdev->dev, fcport->sq_pbl_size, fcport->sq_pbl, fcport->sq_pbl_dma); if (fcport->sq) dma_free_coherent(&qedf->pdev->dev, fcport->sq_mem_size, fcport->sq, fcport->sq_dma); } static int qedf_offload_connection(struct qedf_ctx *qedf, struct qedf_rport *fcport) { struct qed_fcoe_params_offload conn_info; u32 port_id; int rval; uint16_t total_sqe = (fcport->sq_mem_size / sizeof(struct fcoe_wqe)); QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Offloading connection " "portid=%06x.\n", fcport->rdata->ids.port_id); rval = qed_ops->acquire_conn(qedf->cdev, &fcport->handle, &fcport->fw_cid, &fcport->p_doorbell); if (rval) { QEDF_WARN(&(qedf->dbg_ctx), "Could not acquire connection " "for portid=%06x.\n", fcport->rdata->ids.port_id); rval = 1; /* For some reason qed returns 0 on failure here */ goto out; } QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "portid=%06x " "fw_cid=%08x handle=%d.\n", fcport->rdata->ids.port_id, fcport->fw_cid, fcport->handle); memset(&conn_info, 0, sizeof(struct qed_fcoe_params_offload)); /* Fill in the offload connection info */ conn_info.sq_pbl_addr = fcport->sq_pbl_dma; conn_info.sq_curr_page_addr = (dma_addr_t)(*(u64 *)fcport->sq_pbl); conn_info.sq_next_page_addr = (dma_addr_t)(*(u64 *)(fcport->sq_pbl + 8)); /* Need to use our FCoE MAC for the offload session */ ether_addr_copy(conn_info.src_mac, qedf->data_src_addr); ether_addr_copy(conn_info.dst_mac, qedf->ctlr.dest_addr); conn_info.tx_max_fc_pay_len = fcport->rdata->maxframe_size; conn_info.e_d_tov_timer_val = qedf->lport->e_d_tov; conn_info.rec_tov_timer_val = 3; /* I think this is what E3 was */ conn_info.rx_max_fc_pay_len = fcport->rdata->maxframe_size; /* Set VLAN data */ conn_info.vlan_tag = qedf->vlan_id << FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT; conn_info.vlan_tag |= qedf->prio << FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT; conn_info.flags |= (FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK << FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT); /* Set host port source id */ port_id = fc_host_port_id(qedf->lport->host); fcport->sid = port_id; conn_info.s_id.addr_hi = (port_id & 0x000000FF); conn_info.s_id.addr_mid = (port_id & 0x0000FF00) >> 8; conn_info.s_id.addr_lo = (port_id & 0x00FF0000) >> 16; conn_info.max_conc_seqs_c3 = fcport->rdata->max_seq; /* Set remote port destination id */ port_id = fcport->rdata->rport->port_id; conn_info.d_id.addr_hi = (port_id & 0x000000FF); conn_info.d_id.addr_mid = (port_id & 0x0000FF00) >> 8; conn_info.d_id.addr_lo = (port_id & 0x00FF0000) >> 16; conn_info.def_q_idx = 0; /* Default index for send queue? */ /* Set FC-TAPE specific flags if needed */ if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Enable CONF, REC for portid=%06x.\n", fcport->rdata->ids.port_id); conn_info.flags |= 1 << FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT; conn_info.flags |= ((fcport->rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) << FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT; } rval = qed_ops->offload_conn(qedf->cdev, fcport->handle, &conn_info); if (rval) { QEDF_WARN(&(qedf->dbg_ctx), "Could not offload connection " "for portid=%06x.\n", fcport->rdata->ids.port_id); goto out_free_conn; } else QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Offload " "succeeded portid=%06x total_sqe=%d.\n", fcport->rdata->ids.port_id, total_sqe); spin_lock_init(&fcport->rport_lock); atomic_set(&fcport->free_sqes, total_sqe); return 0; out_free_conn: qed_ops->release_conn(qedf->cdev, fcport->handle); out: return rval; } #define QEDF_TERM_BUFF_SIZE 10 static void qedf_upload_connection(struct qedf_ctx *qedf, struct qedf_rport *fcport) { void *term_params; dma_addr_t term_params_dma; /* Term params needs to be a DMA coherent buffer as qed shared the * physical DMA address with the firmware. The buffer may be used in * the receive path so we may eventually have to move this. */ term_params = dma_alloc_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE, &term_params_dma, GFP_KERNEL); if (!term_params) return; QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Uploading connection " "port_id=%06x.\n", fcport->rdata->ids.port_id); qed_ops->destroy_conn(qedf->cdev, fcport->handle, term_params_dma); qed_ops->release_conn(qedf->cdev, fcport->handle); dma_free_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE, term_params, term_params_dma); } static void qedf_cleanup_fcport(struct qedf_ctx *qedf, struct qedf_rport *fcport) { struct fc_rport_priv *rdata = fcport->rdata; QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Cleaning up portid=%06x.\n", fcport->rdata->ids.port_id); /* Flush any remaining i/o's before we upload the connection */ qedf_flush_active_ios(fcport, -1); if (test_and_clear_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) qedf_upload_connection(qedf, fcport); qedf_free_sq(qedf, fcport); fcport->rdata = NULL; fcport->qedf = NULL; kref_put(&rdata->kref, fc_rport_destroy); } /* * This event_callback is called after successful completion of libfc * initiated target login. qedf can proceed with initiating the session * establishment. */ static void qedf_rport_event_handler(struct fc_lport *lport, struct fc_rport_priv *rdata, enum fc_rport_event event) { struct qedf_ctx *qedf = lport_priv(lport); struct fc_rport *rport = rdata->rport; struct fc_rport_libfc_priv *rp; struct qedf_rport *fcport; u32 port_id; int rval; unsigned long flags; QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "event = %d, " "port_id = 0x%x\n", event, rdata->ids.port_id); switch (event) { case RPORT_EV_READY: if (!rport) { QEDF_WARN(&(qedf->dbg_ctx), "rport is NULL.\n"); break; } rp = rport->dd_data; fcport = (struct qedf_rport *)&rp[1]; fcport->qedf = qedf; if (atomic_read(&qedf->num_offloads) >= QEDF_MAX_SESSIONS) { QEDF_ERR(&(qedf->dbg_ctx), "Not offloading " "portid=0x%x as max number of offloaded sessions " "reached.\n", rdata->ids.port_id); return; } /* * Don't try to offload the session again. Can happen when we * get an ADISC */ if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { QEDF_WARN(&(qedf->dbg_ctx), "Session already " "offloaded, portid=0x%x.\n", rdata->ids.port_id); return; } if (rport->port_id == FC_FID_DIR_SERV) { /* * qedf_rport structure doesn't exist for * directory server. * We should not come here, as lport will * take care of fabric login */ QEDF_WARN(&(qedf->dbg_ctx), "rport struct does not " "exist for dir server port_id=%x\n", rdata->ids.port_id); break; } if (rdata->spp_type != FC_TYPE_FCP) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Not offloading since spp type isn't FCP\n"); break; } if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Not FCP target so not offloading\n"); break; } /* Initial reference held on entry, so this can't fail */ kref_get(&rdata->kref); fcport->rdata = rdata; fcport->rport = rport; rval = qedf_alloc_sq(qedf, fcport); if (rval) { qedf_cleanup_fcport(qedf, fcport); break; } /* Set device type */ if (rdata->flags & FC_RP_FLAGS_RETRY && rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET && !(rdata->ids.roles & FC_RPORT_ROLE_FCP_INITIATOR)) { fcport->dev_type = QEDF_RPORT_TYPE_TAPE; QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "portid=%06x is a TAPE device.\n", rdata->ids.port_id); } else { fcport->dev_type = QEDF_RPORT_TYPE_DISK; } rval = qedf_offload_connection(qedf, fcport); if (rval) { qedf_cleanup_fcport(qedf, fcport); break; } /* Add fcport to list of qedf_ctx list of offloaded ports */ spin_lock_irqsave(&qedf->hba_lock, flags); list_add_rcu(&fcport->peers, &qedf->fcports); spin_unlock_irqrestore(&qedf->hba_lock, flags); /* * Set the session ready bit to let everyone know that this * connection is ready for I/O */ set_bit(QEDF_RPORT_SESSION_READY, &fcport->flags); atomic_inc(&qedf->num_offloads); break; case RPORT_EV_LOGO: case RPORT_EV_FAILED: case RPORT_EV_STOP: port_id = rdata->ids.port_id; if (port_id == FC_FID_DIR_SERV) break; if (rdata->spp_type != FC_TYPE_FCP) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "No action since spp type isn't FCP\n"); break; } if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Not FCP target so no action\n"); break; } if (!rport) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "port_id=%x - rport notcreated Yet!!\n", port_id); break; } rp = rport->dd_data; /* * Perform session upload. Note that rdata->peers is already * removed from disc->rports list before we get this event. */ fcport = (struct qedf_rport *)&rp[1]; spin_lock_irqsave(&fcport->rport_lock, flags); /* Only free this fcport if it is offloaded already */ if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) && !test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { set_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags); spin_unlock_irqrestore(&fcport->rport_lock, flags); qedf_cleanup_fcport(qedf, fcport); /* * Remove fcport to list of qedf_ctx list of offloaded * ports */ spin_lock_irqsave(&qedf->hba_lock, flags); list_del_rcu(&fcport->peers); spin_unlock_irqrestore(&qedf->hba_lock, flags); clear_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags); atomic_dec(&qedf->num_offloads); } else { spin_unlock_irqrestore(&fcport->rport_lock, flags); } break; case RPORT_EV_NONE: break; } } static void qedf_abort_io(struct fc_lport *lport) { /* NO-OP but need to fill in the template */ } static void qedf_fcp_cleanup(struct fc_lport *lport) { /* * NO-OP but need to fill in template to prevent a NULL * function pointer dereference during link down. I/Os * will be flushed when port is uploaded. */ } static struct libfc_function_template qedf_lport_template = { .frame_send = qedf_xmit, .fcp_abort_io = qedf_abort_io, .fcp_cleanup = qedf_fcp_cleanup, .rport_event_callback = qedf_rport_event_handler, .elsct_send = qedf_elsct_send, }; static void qedf_fcoe_ctlr_setup(struct qedf_ctx *qedf) { fcoe_ctlr_init(&qedf->ctlr, FIP_MODE_AUTO); qedf->ctlr.send = qedf_fip_send; qedf->ctlr.get_src_addr = qedf_get_src_mac; ether_addr_copy(qedf->ctlr.ctl_src_addr, qedf->mac); } static void qedf_setup_fdmi(struct qedf_ctx *qedf) { struct fc_lport *lport = qedf->lport; u8 buf[8]; int pos; uint32_t i; /* * fdmi_enabled needs to be set for libfc * to execute FDMI registration */ lport->fdmi_enabled = 1; /* * Setup the necessary fc_host attributes to that will be used to fill * in the FDMI information. */ /* Get the PCI-e Device Serial Number Capability */ pos = pci_find_ext_capability(qedf->pdev, PCI_EXT_CAP_ID_DSN); if (pos) { pos += 4; for (i = 0; i < 8; i++) pci_read_config_byte(qedf->pdev, pos + i, &buf[i]); snprintf(fc_host_serial_number(lport->host), FC_SERIAL_NUMBER_SIZE, "%02X%02X%02X%02X%02X%02X%02X%02X", buf[7], buf[6], buf[5], buf[4], buf[3], buf[2], buf[1], buf[0]); } else snprintf(fc_host_serial_number(lport->host), FC_SERIAL_NUMBER_SIZE, "Unknown"); snprintf(fc_host_manufacturer(lport->host), FC_SERIAL_NUMBER_SIZE, "%s", "Marvell Semiconductor Inc."); if (qedf->pdev->device == QL45xxx) { snprintf(fc_host_model(lport->host), FC_SYMBOLIC_NAME_SIZE, "%s", "QL45xxx"); snprintf(fc_host_model_description(lport->host), FC_SYMBOLIC_NAME_SIZE, "%s", "Marvell FastLinQ QL45xxx FCoE Adapter"); } if (qedf->pdev->device == QL41xxx) { snprintf(fc_host_model(lport->host), FC_SYMBOLIC_NAME_SIZE, "%s", "QL41xxx"); snprintf(fc_host_model_description(lport->host), FC_SYMBOLIC_NAME_SIZE, "%s", "Marvell FastLinQ QL41xxx FCoE Adapter"); } snprintf(fc_host_hardware_version(lport->host), FC_VERSION_STRING_SIZE, "Rev %d", qedf->pdev->revision); snprintf(fc_host_driver_version(lport->host), FC_VERSION_STRING_SIZE, "%s", QEDF_VERSION); snprintf(fc_host_firmware_version(lport->host), FC_VERSION_STRING_SIZE, "%d.%d.%d.%d", FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION, FW_ENGINEERING_VERSION); snprintf(fc_host_vendor_identifier(lport->host), FC_VENDOR_IDENTIFIER, "%s", "Marvell"); } static int qedf_lport_setup(struct qedf_ctx *qedf) { struct fc_lport *lport = qedf->lport; lport->link_up = 0; lport->max_retry_count = QEDF_FLOGI_RETRY_CNT; lport->max_rport_retry_count = QEDF_RPORT_RETRY_CNT; lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL); lport->boot_time = jiffies; lport->e_d_tov = 2 * 1000; lport->r_a_tov = 10 * 1000; /* Set NPIV support */ lport->does_npiv = 1; fc_host_max_npiv_vports(lport->host) = QEDF_MAX_NPIV; fc_set_wwnn(lport, qedf->wwnn); fc_set_wwpn(lport, qedf->wwpn); if (fcoe_libfc_config(lport, &qedf->ctlr, &qedf_lport_template, 0)) { QEDF_ERR(&qedf->dbg_ctx, "fcoe_libfc_config failed.\n"); return -ENOMEM; } /* Allocate the exchange manager */ fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_PARAMS_NUM_TASKS, 0xfffe, NULL); if (fc_lport_init_stats(lport)) return -ENOMEM; /* Finish lport config */ fc_lport_config(lport); /* Set max frame size */ fc_set_mfs(lport, QEDF_MFS); fc_host_maxframe_size(lport->host) = lport->mfs; /* Set default dev_loss_tmo based on module parameter */ fc_host_dev_loss_tmo(lport->host) = qedf_dev_loss_tmo; /* Set symbolic node name */ if (qedf->pdev->device == QL45xxx) snprintf(fc_host_symbolic_name(lport->host), 256, "Marvell FastLinQ 45xxx FCoE v%s", QEDF_VERSION); if (qedf->pdev->device == QL41xxx) snprintf(fc_host_symbolic_name(lport->host), 256, "Marvell FastLinQ 41xxx FCoE v%s", QEDF_VERSION); qedf_setup_fdmi(qedf); return 0; } /* * NPIV functions */ static int qedf_vport_libfc_config(struct fc_vport *vport, struct fc_lport *lport) { lport->link_up = 0; lport->qfull = 0; lport->max_retry_count = QEDF_FLOGI_RETRY_CNT; lport->max_rport_retry_count = QEDF_RPORT_RETRY_CNT; lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL); lport->boot_time = jiffies; lport->e_d_tov = 2 * 1000; lport->r_a_tov = 10 * 1000; lport->does_npiv = 1; /* Temporary until we add NPIV support */ /* Allocate stats for vport */ if (fc_lport_init_stats(lport)) return -ENOMEM; /* Finish lport config */ fc_lport_config(lport); /* offload related configuration */ lport->crc_offload = 0; lport->seq_offload = 0; lport->lro_enabled = 0; lport->lro_xid = 0; lport->lso_max = 0; return 0; } static int qedf_vport_create(struct fc_vport *vport, bool disabled) { struct Scsi_Host *shost = vport_to_shost(vport); struct fc_lport *n_port = shost_priv(shost); struct fc_lport *vn_port; struct qedf_ctx *base_qedf = lport_priv(n_port); struct qedf_ctx *vport_qedf; char buf[32]; int rc = 0; rc = fcoe_validate_vport_create(vport); if (rc) { fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf)); QEDF_WARN(&(base_qedf->dbg_ctx), "Failed to create vport, " "WWPN (0x%s) already exists.\n", buf); return rc; } if (atomic_read(&base_qedf->link_state) != QEDF_LINK_UP) { QEDF_WARN(&(base_qedf->dbg_ctx), "Cannot create vport " "because link is not up.\n"); return -EIO; } vn_port = libfc_vport_create(vport, sizeof(struct qedf_ctx)); if (!vn_port) { QEDF_WARN(&(base_qedf->dbg_ctx), "Could not create lport " "for vport.\n"); return -ENOMEM; } fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf)); QEDF_ERR(&(base_qedf->dbg_ctx), "Creating NPIV port, WWPN=%s.\n", buf); /* Copy some fields from base_qedf */ vport_qedf = lport_priv(vn_port); memcpy(vport_qedf, base_qedf, sizeof(struct qedf_ctx)); /* Set qedf data specific to this vport */ vport_qedf->lport = vn_port; /* Use same hba_lock as base_qedf */ vport_qedf->hba_lock = base_qedf->hba_lock; vport_qedf->pdev = base_qedf->pdev; vport_qedf->cmd_mgr = base_qedf->cmd_mgr; init_completion(&vport_qedf->flogi_compl); INIT_LIST_HEAD(&vport_qedf->fcports); INIT_DELAYED_WORK(&vport_qedf->stag_work, qedf_stag_change_work); rc = qedf_vport_libfc_config(vport, vn_port); if (rc) { QEDF_ERR(&(base_qedf->dbg_ctx), "Could not allocate memory " "for lport stats.\n"); goto err; } fc_set_wwnn(vn_port, vport->node_name); fc_set_wwpn(vn_port, vport->port_name); vport_qedf->wwnn = vn_port->wwnn; vport_qedf->wwpn = vn_port->wwpn; vn_port->host->transportt = qedf_fc_vport_transport_template; vn_port->host->can_queue = FCOE_PARAMS_NUM_TASKS; vn_port->host->max_lun = qedf_max_lun; vn_port->host->sg_tablesize = QEDF_MAX_BDS_PER_CMD; vn_port->host->max_cmd_len = QEDF_MAX_CDB_LEN; vn_port->host->max_id = QEDF_MAX_SESSIONS; rc = scsi_add_host(vn_port->host, &vport->dev); if (rc) { QEDF_WARN(&base_qedf->dbg_ctx, "Error adding Scsi_Host rc=0x%x.\n", rc); goto err; } /* Set default dev_loss_tmo based on module parameter */ fc_host_dev_loss_tmo(vn_port->host) = qedf_dev_loss_tmo; /* Init libfc stuffs */ memcpy(&vn_port->tt, &qedf_lport_template, sizeof(qedf_lport_template)); fc_exch_init(vn_port); fc_elsct_init(vn_port); fc_lport_init(vn_port); fc_disc_init(vn_port); fc_disc_config(vn_port, vn_port); /* Allocate the exchange manager */ shost = vport_to_shost(vport); n_port = shost_priv(shost); fc_exch_mgr_list_clone(n_port, vn_port); /* Set max frame size */ fc_set_mfs(vn_port, QEDF_MFS); fc_host_port_type(vn_port->host) = FC_PORTTYPE_UNKNOWN; if (disabled) { fc_vport_set_state(vport, FC_VPORT_DISABLED); } else { vn_port->boot_time = jiffies; fc_fabric_login(vn_port); fc_vport_setlink(vn_port); } /* Set symbolic node name */ if (base_qedf->pdev->device == QL45xxx) snprintf(fc_host_symbolic_name(vn_port->host), 256, "Marvell FastLinQ 45xxx FCoE v%s", QEDF_VERSION); if (base_qedf->pdev->device == QL41xxx) snprintf(fc_host_symbolic_name(vn_port->host), 256, "Marvell FastLinQ 41xxx FCoE v%s", QEDF_VERSION); /* Set supported speed */ fc_host_supported_speeds(vn_port->host) = n_port->link_supported_speeds; /* Set speed */ vn_port->link_speed = n_port->link_speed; /* Set port type */ fc_host_port_type(vn_port->host) = FC_PORTTYPE_NPIV; /* Set maxframe size */ fc_host_maxframe_size(vn_port->host) = n_port->mfs; QEDF_INFO(&(base_qedf->dbg_ctx), QEDF_LOG_NPIV, "vn_port=%p.\n", vn_port); /* Set up debug context for vport */ vport_qedf->dbg_ctx.host_no = vn_port->host->host_no; vport_qedf->dbg_ctx.pdev = base_qedf->pdev; return 0; err: scsi_host_put(vn_port->host); return rc; } static int qedf_vport_destroy(struct fc_vport *vport) { struct Scsi_Host *shost = vport_to_shost(vport); struct fc_lport *n_port = shost_priv(shost); struct fc_lport *vn_port = vport->dd_data; struct qedf_ctx *qedf = lport_priv(vn_port); if (!qedf) { QEDF_ERR(NULL, "qedf is NULL.\n"); goto out; } /* Set unloading bit on vport qedf_ctx to prevent more I/O */ set_bit(QEDF_UNLOADING, &qedf->flags); mutex_lock(&n_port->lp_mutex); list_del(&vn_port->list); mutex_unlock(&n_port->lp_mutex); fc_fabric_logoff(vn_port); fc_lport_destroy(vn_port); /* Detach from scsi-ml */ fc_remove_host(vn_port->host); scsi_remove_host(vn_port->host); /* * Only try to release the exchange manager if the vn_port * configuration is complete. */ if (vn_port->state == LPORT_ST_READY) fc_exch_mgr_free(vn_port); /* Free memory used by statistical counters */ fc_lport_free_stats(vn_port); /* Release Scsi_Host */ scsi_host_put(vn_port->host); out: return 0; } static int qedf_vport_disable(struct fc_vport *vport, bool disable) { struct fc_lport *lport = vport->dd_data; if (disable) { fc_vport_set_state(vport, FC_VPORT_DISABLED); fc_fabric_logoff(lport); } else { lport->boot_time = jiffies; fc_fabric_login(lport); fc_vport_setlink(lport); } return 0; } /* * During removal we need to wait for all the vports associated with a port * to be destroyed so we avoid a race condition where libfc is still trying * to reap vports while the driver remove function has already reaped the * driver contexts associated with the physical port. */ static void qedf_wait_for_vport_destroy(struct qedf_ctx *qedf) { struct fc_host_attrs *fc_host = shost_to_fc_host(qedf->lport->host); QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_NPIV, "Entered.\n"); while (fc_host->npiv_vports_inuse > 0) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_NPIV, "Waiting for all vports to be reaped.\n"); msleep(1000); } } /** * qedf_fcoe_reset - Resets the fcoe * * @shost: shost the reset is from * * Returns: always 0 */ static int qedf_fcoe_reset(struct Scsi_Host *shost) { struct fc_lport *lport = shost_priv(shost); qedf_ctx_soft_reset(lport); return 0; } static void qedf_get_host_port_id(struct Scsi_Host *shost) { struct fc_lport *lport = shost_priv(shost); fc_host_port_id(shost) = lport->port_id; } static struct fc_host_statistics *qedf_fc_get_host_stats(struct Scsi_Host *shost) { struct fc_host_statistics *qedf_stats; struct fc_lport *lport = shost_priv(shost); struct qedf_ctx *qedf = lport_priv(lport); struct qed_fcoe_stats *fw_fcoe_stats; qedf_stats = fc_get_host_stats(shost); /* We don't collect offload stats for specific NPIV ports */ if (lport->vport) goto out; fw_fcoe_stats = kmalloc(sizeof(struct qed_fcoe_stats), GFP_KERNEL); if (!fw_fcoe_stats) { QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate memory for " "fw_fcoe_stats.\n"); goto out; } mutex_lock(&qedf->stats_mutex); /* Query firmware for offload stats */ qed_ops->get_stats(qedf->cdev, fw_fcoe_stats); /* * The expectation is that we add our offload stats to the stats * being maintained by libfc each time the fc_get_host_status callback * is invoked. The additions are not carried over for each call to * the fc_get_host_stats callback. */ qedf_stats->tx_frames += fw_fcoe_stats->fcoe_tx_data_pkt_cnt + fw_fcoe_stats->fcoe_tx_xfer_pkt_cnt + fw_fcoe_stats->fcoe_tx_other_pkt_cnt; qedf_stats->rx_frames += fw_fcoe_stats->fcoe_rx_data_pkt_cnt + fw_fcoe_stats->fcoe_rx_xfer_pkt_cnt + fw_fcoe_stats->fcoe_rx_other_pkt_cnt; qedf_stats->fcp_input_megabytes += do_div(fw_fcoe_stats->fcoe_rx_byte_cnt, 1000000); qedf_stats->fcp_output_megabytes += do_div(fw_fcoe_stats->fcoe_tx_byte_cnt, 1000000); qedf_stats->rx_words += fw_fcoe_stats->fcoe_rx_byte_cnt / 4; qedf_stats->tx_words += fw_fcoe_stats->fcoe_tx_byte_cnt / 4; qedf_stats->invalid_crc_count += fw_fcoe_stats->fcoe_silent_drop_pkt_crc_error_cnt; qedf_stats->dumped_frames = fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt; qedf_stats->error_frames += fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt; qedf_stats->fcp_input_requests += qedf->input_requests; qedf_stats->fcp_output_requests += qedf->output_requests; qedf_stats->fcp_control_requests += qedf->control_requests; qedf_stats->fcp_packet_aborts += qedf->packet_aborts; qedf_stats->fcp_frame_alloc_failures += qedf->alloc_failures; mutex_unlock(&qedf->stats_mutex); kfree(fw_fcoe_stats); out: return qedf_stats; } static struct fc_function_template qedf_fc_transport_fn = { .show_host_node_name = 1, .show_host_port_name = 1, .show_host_supported_classes = 1, .show_host_supported_fc4s = 1, .show_host_active_fc4s = 1, .show_host_maxframe_size = 1, .get_host_port_id = qedf_get_host_port_id, .show_host_port_id = 1, .show_host_supported_speeds = 1, .get_host_speed = fc_get_host_speed, .show_host_speed = 1, .show_host_port_type = 1, .get_host_port_state = fc_get_host_port_state, .show_host_port_state = 1, .show_host_symbolic_name = 1, /* * Tell FC transport to allocate enough space to store the backpointer * for the associate qedf_rport struct. */ .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) + sizeof(struct qedf_rport)), .show_rport_maxframe_size = 1, .show_rport_supported_classes = 1, .show_host_fabric_name = 1, .show_starget_node_name = 1, .show_starget_port_name = 1, .show_starget_port_id = 1, .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo, .show_rport_dev_loss_tmo = 1, .get_fc_host_stats = qedf_fc_get_host_stats, .issue_fc_host_lip = qedf_fcoe_reset, .vport_create = qedf_vport_create, .vport_delete = qedf_vport_destroy, .vport_disable = qedf_vport_disable, .bsg_request = fc_lport_bsg_request, }; static struct fc_function_template qedf_fc_vport_transport_fn = { .show_host_node_name = 1, .show_host_port_name = 1, .show_host_supported_classes = 1, .show_host_supported_fc4s = 1, .show_host_active_fc4s = 1, .show_host_maxframe_size = 1, .show_host_port_id = 1, .show_host_supported_speeds = 1, .get_host_speed = fc_get_host_speed, .show_host_speed = 1, .show_host_port_type = 1, .get_host_port_state = fc_get_host_port_state, .show_host_port_state = 1, .show_host_symbolic_name = 1, .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) + sizeof(struct qedf_rport)), .show_rport_maxframe_size = 1, .show_rport_supported_classes = 1, .show_host_fabric_name = 1, .show_starget_node_name = 1, .show_starget_port_name = 1, .show_starget_port_id = 1, .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo, .show_rport_dev_loss_tmo = 1, .get_fc_host_stats = fc_get_host_stats, .issue_fc_host_lip = qedf_fcoe_reset, .bsg_request = fc_lport_bsg_request, }; static bool qedf_fp_has_work(struct qedf_fastpath *fp) { struct qedf_ctx *qedf = fp->qedf; struct global_queue *que; struct qed_sb_info *sb_info = fp->sb_info; struct status_block *sb = sb_info->sb_virt; u16 prod_idx; /* Get the pointer to the global CQ this completion is on */ que = qedf->global_queues[fp->sb_id]; /* Be sure all responses have been written to PI */ rmb(); /* Get the current firmware producer index */ prod_idx = sb->pi_array[QEDF_FCOE_PARAMS_GL_RQ_PI]; return (que->cq_prod_idx != prod_idx); } /* * Interrupt handler code. */ /* Process completion queue and copy CQE contents for deferred processesing * * Return true if we should wake the I/O thread, false if not. */ static bool qedf_process_completions(struct qedf_fastpath *fp) { struct qedf_ctx *qedf = fp->qedf; struct qed_sb_info *sb_info = fp->sb_info; struct status_block *sb = sb_info->sb_virt; struct global_queue *que; u16 prod_idx; struct fcoe_cqe *cqe; struct qedf_io_work *io_work; unsigned int cpu; struct qedf_ioreq *io_req = NULL; u16 xid; u16 new_cqes; u32 comp_type; /* Get the current firmware producer index */ prod_idx = sb->pi_array[QEDF_FCOE_PARAMS_GL_RQ_PI]; /* Get the pointer to the global CQ this completion is on */ que = qedf->global_queues[fp->sb_id]; /* Calculate the amount of new elements since last processing */ new_cqes = (prod_idx >= que->cq_prod_idx) ? (prod_idx - que->cq_prod_idx) : 0x10000 - que->cq_prod_idx + prod_idx; /* Save producer index */ que->cq_prod_idx = prod_idx; while (new_cqes) { fp->completions++; cqe = &que->cq[que->cq_cons_idx]; comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) & FCOE_CQE_CQE_TYPE_MASK; /* * Process unsolicited CQEs directly in the interrupt handler * sine we need the fastpath ID */ if (comp_type == FCOE_UNSOLIC_CQE_TYPE) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL, "Unsolicated CQE.\n"); qedf_process_unsol_compl(qedf, fp->sb_id, cqe); /* * Don't add a work list item. Increment consumer * consumer index and move on. */ goto inc_idx; } xid = cqe->cqe_data & FCOE_CQE_TASK_ID_MASK; io_req = &qedf->cmd_mgr->cmds[xid]; /* * Figure out which percpu thread we should queue this I/O * on. */ if (!io_req) /* If there is not io_req assocated with this CQE * just queue it on CPU 0 */ cpu = 0; else { cpu = io_req->cpu; io_req->int_cpu = smp_processor_id(); } io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC); if (!io_work) { QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate " "work for I/O completion.\n"); continue; } memset(io_work, 0, sizeof(struct qedf_io_work)); INIT_WORK(&io_work->work, qedf_fp_io_handler); /* Copy contents of CQE for deferred processing */ memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe)); io_work->qedf = fp->qedf; io_work->fp = NULL; /* Only used for unsolicited frames */ queue_work_on(cpu, qedf_io_wq, &io_work->work); inc_idx: que->cq_cons_idx++; if (que->cq_cons_idx == fp->cq_num_entries) que->cq_cons_idx = 0; new_cqes--; } return true; } /* MSI-X fastpath handler code */ static irqreturn_t qedf_msix_handler(int irq, void *dev_id) { struct qedf_fastpath *fp = dev_id; if (!fp) { QEDF_ERR(NULL, "fp is null.\n"); return IRQ_HANDLED; } if (!fp->sb_info) { QEDF_ERR(NULL, "fp->sb_info in null."); return IRQ_HANDLED; } /* * Disable interrupts for this status block while we process new * completions */ qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/); while (1) { qedf_process_completions(fp); if (qedf_fp_has_work(fp) == 0) { /* Update the sb information */ qed_sb_update_sb_idx(fp->sb_info); /* Check for more work */ rmb(); if (qedf_fp_has_work(fp) == 0) { /* Re-enable interrupts */ qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1); return IRQ_HANDLED; } } } /* Do we ever want to break out of above loop? */ return IRQ_HANDLED; } /* simd handler for MSI/INTa */ static void qedf_simd_int_handler(void *cookie) { /* Cookie is qedf_ctx struct */ struct qedf_ctx *qedf = (struct qedf_ctx *)cookie; QEDF_WARN(&(qedf->dbg_ctx), "qedf=%p.\n", qedf); } #define QEDF_SIMD_HANDLER_NUM 0 static void qedf_sync_free_irqs(struct qedf_ctx *qedf) { int i; u16 vector_idx = 0; u32 vector; if (qedf->int_info.msix_cnt) { for (i = 0; i < qedf->int_info.used_cnt; i++) { vector_idx = i * qedf->dev_info.common.num_hwfns + qed_ops->common->get_affin_hwfn_idx(qedf->cdev); QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Freeing IRQ #%d vector_idx=%d.\n", i, vector_idx); vector = qedf->int_info.msix[vector_idx].vector; synchronize_irq(vector); irq_set_affinity_hint(vector, NULL); irq_set_affinity_notifier(vector, NULL); free_irq(vector, &qedf->fp_array[i]); } } else qed_ops->common->simd_handler_clean(qedf->cdev, QEDF_SIMD_HANDLER_NUM); qedf->int_info.used_cnt = 0; qed_ops->common->set_fp_int(qedf->cdev, 0); } static int qedf_request_msix_irq(struct qedf_ctx *qedf) { int i, rc, cpu; u16 vector_idx = 0; u32 vector; cpu = cpumask_first(cpu_online_mask); for (i = 0; i < qedf->num_queues; i++) { vector_idx = i * qedf->dev_info.common.num_hwfns + qed_ops->common->get_affin_hwfn_idx(qedf->cdev); QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Requesting IRQ #%d vector_idx=%d.\n", i, vector_idx); vector = qedf->int_info.msix[vector_idx].vector; rc = request_irq(vector, qedf_msix_handler, 0, "qedf", &qedf->fp_array[i]); if (rc) { QEDF_WARN(&(qedf->dbg_ctx), "request_irq failed.\n"); qedf_sync_free_irqs(qedf); return rc; } qedf->int_info.used_cnt++; rc = irq_set_affinity_hint(vector, get_cpu_mask(cpu)); cpu = cpumask_next(cpu, cpu_online_mask); } return 0; } static int qedf_setup_int(struct qedf_ctx *qedf) { int rc = 0; /* * Learn interrupt configuration */ rc = qed_ops->common->set_fp_int(qedf->cdev, num_online_cpus()); if (rc <= 0) return 0; rc = qed_ops->common->get_fp_int(qedf->cdev, &qedf->int_info); if (rc) return 0; QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of msix_cnt = " "0x%x num of cpus = 0x%x\n", qedf->int_info.msix_cnt, num_online_cpus()); if (qedf->int_info.msix_cnt) return qedf_request_msix_irq(qedf); qed_ops->common->simd_handler_config(qedf->cdev, &qedf, QEDF_SIMD_HANDLER_NUM, qedf_simd_int_handler); qedf->int_info.used_cnt = 1; QEDF_ERR(&qedf->dbg_ctx, "Cannot load driver due to a lack of MSI-X vectors.\n"); return -EINVAL; } /* Main function for libfc frame reception */ static void qedf_recv_frame(struct qedf_ctx *qedf, struct sk_buff *skb) { u32 fr_len; struct fc_lport *lport; struct fc_frame_header *fh; struct fcoe_crc_eof crc_eof; struct fc_frame *fp; u8 *mac = NULL; u8 *dest_mac = NULL; struct fcoe_hdr *hp; struct qedf_rport *fcport; struct fc_lport *vn_port; u32 f_ctl; lport = qedf->lport; if (lport == NULL || lport->state == LPORT_ST_DISABLED) { QEDF_WARN(NULL, "Invalid lport struct or lport disabled.\n"); kfree_skb(skb); return; } if (skb_is_nonlinear(skb)) skb_linearize(skb); mac = eth_hdr(skb)->h_source; dest_mac = eth_hdr(skb)->h_dest; /* Pull the header */ hp = (struct fcoe_hdr *)skb->data; fh = (struct fc_frame_header *) skb_transport_header(skb); skb_pull(skb, sizeof(struct fcoe_hdr)); fr_len = skb->len - sizeof(struct fcoe_crc_eof); fp = (struct fc_frame *)skb; fc_frame_init(fp); fr_dev(fp) = lport; fr_sof(fp) = hp->fcoe_sof; if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) { QEDF_INFO(NULL, QEDF_LOG_LL2, "skb_copy_bits failed.\n"); kfree_skb(skb); return; } fr_eof(fp) = crc_eof.fcoe_eof; fr_crc(fp) = crc_eof.fcoe_crc32; if (pskb_trim(skb, fr_len)) { QEDF_INFO(NULL, QEDF_LOG_LL2, "pskb_trim failed.\n"); kfree_skb(skb); return; } fh = fc_frame_header_get(fp); /* * Invalid frame filters. */ if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && fh->fh_type == FC_TYPE_FCP) { /* Drop FCP data. We dont this in L2 path */ kfree_skb(skb); return; } if (fh->fh_r_ctl == FC_RCTL_ELS_REQ && fh->fh_type == FC_TYPE_ELS) { switch (fc_frame_payload_op(fp)) { case ELS_LOGO: if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) { /* drop non-FIP LOGO */ kfree_skb(skb); return; } break; } } if (fh->fh_r_ctl == FC_RCTL_BA_ABTS) { /* Drop incoming ABTS */ kfree_skb(skb); return; } if (ntoh24(&dest_mac[3]) != ntoh24(fh->fh_d_id)) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FC frame d_id mismatch with MAC %pM.\n", dest_mac); kfree_skb(skb); return; } if (qedf->ctlr.state) { if (!ether_addr_equal(mac, qedf->ctlr.dest_addr)) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "Wrong source address: mac:%pM dest_addr:%pM.\n", mac, qedf->ctlr.dest_addr); kfree_skb(skb); return; } } vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id)); /* * If the destination ID from the frame header does not match what we * have on record for lport and the search for a NPIV port came up * empty then this is not addressed to our port so simply drop it. */ if (lport->port_id != ntoh24(fh->fh_d_id) && !vn_port) { QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2, "Dropping frame due to destination mismatch: lport->port_id=0x%x fh->d_id=0x%x.\n", lport->port_id, ntoh24(fh->fh_d_id)); kfree_skb(skb); return; } f_ctl = ntoh24(fh->fh_f_ctl); if ((fh->fh_type == FC_TYPE_BLS) && (f_ctl & FC_FC_SEQ_CTX) && (f_ctl & FC_FC_EX_CTX)) { /* Drop incoming ABTS response that has both SEQ/EX CTX set */ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2, "Dropping ABTS response as both SEQ/EX CTX set.\n"); kfree_skb(skb); return; } /* * If a connection is uploading, drop incoming FCoE frames as there * is a small window where we could try to return a frame while libfc * is trying to clean things up. */ /* Get fcport associated with d_id if it exists */ fcport = qedf_fcport_lookup(qedf, ntoh24(fh->fh_d_id)); if (fcport && test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "Connection uploading, dropping fp=%p.\n", fp); kfree_skb(skb); return; } QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FCoE frame receive: " "skb=%p fp=%p src=%06x dest=%06x r_ctl=%x fh_type=%x.\n", skb, fp, ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl, fh->fh_type); if (qedf_dump_frames) print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16, 1, skb->data, skb->len, false); fc_exch_recv(lport, fp); } static void qedf_ll2_process_skb(struct work_struct *work) { struct qedf_skb_work *skb_work = container_of(work, struct qedf_skb_work, work); struct qedf_ctx *qedf = skb_work->qedf; struct sk_buff *skb = skb_work->skb; struct ethhdr *eh; if (!qedf) { QEDF_ERR(NULL, "qedf is NULL\n"); goto err_out; } eh = (struct ethhdr *)skb->data; /* Undo VLAN encapsulation */ if (eh->h_proto == htons(ETH_P_8021Q)) { memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2); eh = skb_pull(skb, VLAN_HLEN); skb_reset_mac_header(skb); } /* * Process either a FIP frame or FCoE frame based on the * protocol value. If it's not either just drop the * frame. */ if (eh->h_proto == htons(ETH_P_FIP)) { qedf_fip_recv(qedf, skb); goto out; } else if (eh->h_proto == htons(ETH_P_FCOE)) { __skb_pull(skb, ETH_HLEN); qedf_recv_frame(qedf, skb); goto out; } else goto err_out; err_out: kfree_skb(skb); out: kfree(skb_work); return; } static int qedf_ll2_rx(void *cookie, struct sk_buff *skb, u32 arg1, u32 arg2) { struct qedf_ctx *qedf = (struct qedf_ctx *)cookie; struct qedf_skb_work *skb_work; if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) { QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2, "Dropping frame as link state is down.\n"); kfree_skb(skb); return 0; } skb_work = kzalloc(sizeof(struct qedf_skb_work), GFP_ATOMIC); if (!skb_work) { QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate skb_work so " "dropping frame.\n"); kfree_skb(skb); return 0; } INIT_WORK(&skb_work->work, qedf_ll2_process_skb); skb_work->skb = skb; skb_work->qedf = qedf; queue_work(qedf->ll2_recv_wq, &skb_work->work); return 0; } static struct qed_ll2_cb_ops qedf_ll2_cb_ops = { .rx_cb = qedf_ll2_rx, .tx_cb = NULL, }; /* Main thread to process I/O completions */ void qedf_fp_io_handler(struct work_struct *work) { struct qedf_io_work *io_work = container_of(work, struct qedf_io_work, work); u32 comp_type; /* * Deferred part of unsolicited CQE sends * frame to libfc. */ comp_type = (io_work->cqe.cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) & FCOE_CQE_CQE_TYPE_MASK; if (comp_type == FCOE_UNSOLIC_CQE_TYPE && io_work->fp) fc_exch_recv(io_work->qedf->lport, io_work->fp); else qedf_process_cqe(io_work->qedf, &io_work->cqe); kfree(io_work); } static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf, struct qed_sb_info *sb_info, u16 sb_id) { struct status_block *sb_virt; dma_addr_t sb_phys; int ret; sb_virt = dma_alloc_coherent(&qedf->pdev->dev, sizeof(struct status_block), &sb_phys, GFP_KERNEL); if (!sb_virt) { QEDF_ERR(&qedf->dbg_ctx, "Status block allocation failed for id = %d.\n", sb_id); return -ENOMEM; } ret = qed_ops->common->sb_init(qedf->cdev, sb_info, sb_virt, sb_phys, sb_id, QED_SB_TYPE_STORAGE); if (ret) { QEDF_ERR(&qedf->dbg_ctx, "Status block initialization failed (0x%x) for id = %d.\n", ret, sb_id); return ret; } return 0; } static void qedf_free_sb(struct qedf_ctx *qedf, struct qed_sb_info *sb_info) { if (sb_info->sb_virt) dma_free_coherent(&qedf->pdev->dev, sizeof(*sb_info->sb_virt), (void *)sb_info->sb_virt, sb_info->sb_phys); } static void qedf_destroy_sb(struct qedf_ctx *qedf) { int id; struct qedf_fastpath *fp = NULL; for (id = 0; id < qedf->num_queues; id++) { fp = &(qedf->fp_array[id]); if (fp->sb_id == QEDF_SB_ID_NULL) break; qedf_free_sb(qedf, fp->sb_info); kfree(fp->sb_info); } kfree(qedf->fp_array); } static int qedf_prepare_sb(struct qedf_ctx *qedf) { int id; struct qedf_fastpath *fp; int ret; qedf->fp_array = kcalloc(qedf->num_queues, sizeof(struct qedf_fastpath), GFP_KERNEL); if (!qedf->fp_array) { QEDF_ERR(&(qedf->dbg_ctx), "fastpath array allocation " "failed.\n"); return -ENOMEM; } for (id = 0; id < qedf->num_queues; id++) { fp = &(qedf->fp_array[id]); fp->sb_id = QEDF_SB_ID_NULL; fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL); if (!fp->sb_info) { QEDF_ERR(&(qedf->dbg_ctx), "SB info struct " "allocation failed.\n"); goto err; } ret = qedf_alloc_and_init_sb(qedf, fp->sb_info, id); if (ret) { QEDF_ERR(&(qedf->dbg_ctx), "SB allocation and " "initialization failed.\n"); goto err; } fp->sb_id = id; fp->qedf = qedf; fp->cq_num_entries = qedf->global_queues[id]->cq_mem_size / sizeof(struct fcoe_cqe); } err: return 0; } void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe) { u16 xid; struct qedf_ioreq *io_req; struct qedf_rport *fcport; u32 comp_type; u8 io_comp_type; unsigned long flags; comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) & FCOE_CQE_CQE_TYPE_MASK; xid = cqe->cqe_data & FCOE_CQE_TASK_ID_MASK; io_req = &qedf->cmd_mgr->cmds[xid]; /* Completion not for a valid I/O anymore so just return */ if (!io_req) { QEDF_ERR(&qedf->dbg_ctx, "io_req is NULL for xid=0x%x.\n", xid); return; } fcport = io_req->fcport; if (fcport == NULL) { QEDF_ERR(&qedf->dbg_ctx, "fcport is NULL for xid=0x%x io_req=%p.\n", xid, io_req); return; } /* * Check that fcport is offloaded. If it isn't then the spinlock * isn't valid and shouldn't be taken. We should just return. */ if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { QEDF_ERR(&qedf->dbg_ctx, "Session not offloaded yet, fcport = %p.\n", fcport); return; } spin_lock_irqsave(&fcport->rport_lock, flags); io_comp_type = io_req->cmd_type; spin_unlock_irqrestore(&fcport->rport_lock, flags); switch (comp_type) { case FCOE_GOOD_COMPLETION_CQE_TYPE: atomic_inc(&fcport->free_sqes); switch (io_comp_type) { case QEDF_SCSI_CMD: qedf_scsi_completion(qedf, cqe, io_req); break; case QEDF_ELS: qedf_process_els_compl(qedf, cqe, io_req); break; case QEDF_TASK_MGMT_CMD: qedf_process_tmf_compl(qedf, cqe, io_req); break; case QEDF_SEQ_CLEANUP: qedf_process_seq_cleanup_compl(qedf, cqe, io_req); break; } break; case FCOE_ERROR_DETECTION_CQE_TYPE: atomic_inc(&fcport->free_sqes); QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Error detect CQE.\n"); qedf_process_error_detect(qedf, cqe, io_req); break; case FCOE_EXCH_CLEANUP_CQE_TYPE: atomic_inc(&fcport->free_sqes); QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Cleanup CQE.\n"); qedf_process_cleanup_compl(qedf, cqe, io_req); break; case FCOE_ABTS_CQE_TYPE: atomic_inc(&fcport->free_sqes); QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Abort CQE.\n"); qedf_process_abts_compl(qedf, cqe, io_req); break; case FCOE_DUMMY_CQE_TYPE: atomic_inc(&fcport->free_sqes); QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Dummy CQE.\n"); break; case FCOE_LOCAL_COMP_CQE_TYPE: atomic_inc(&fcport->free_sqes); QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Local completion CQE.\n"); break; case FCOE_WARNING_CQE_TYPE: atomic_inc(&fcport->free_sqes); QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Warning CQE.\n"); qedf_process_warning_compl(qedf, cqe, io_req); break; case MAX_FCOE_CQE_TYPE: atomic_inc(&fcport->free_sqes); QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Max FCoE CQE.\n"); break; default: QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Default CQE.\n"); break; } } static void qedf_free_bdq(struct qedf_ctx *qedf) { int i; if (qedf->bdq_pbl_list) dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE, qedf->bdq_pbl_list, qedf->bdq_pbl_list_dma); if (qedf->bdq_pbl) dma_free_coherent(&qedf->pdev->dev, qedf->bdq_pbl_mem_size, qedf->bdq_pbl, qedf->bdq_pbl_dma); for (i = 0; i < QEDF_BDQ_SIZE; i++) { if (qedf->bdq[i].buf_addr) { dma_free_coherent(&qedf->pdev->dev, QEDF_BDQ_BUF_SIZE, qedf->bdq[i].buf_addr, qedf->bdq[i].buf_dma); } } } static void qedf_free_global_queues(struct qedf_ctx *qedf) { int i; struct global_queue **gl = qedf->global_queues; for (i = 0; i < qedf->num_queues; i++) { if (!gl[i]) continue; if (gl[i]->cq) dma_free_coherent(&qedf->pdev->dev, gl[i]->cq_mem_size, gl[i]->cq, gl[i]->cq_dma); if (gl[i]->cq_pbl) dma_free_coherent(&qedf->pdev->dev, gl[i]->cq_pbl_size, gl[i]->cq_pbl, gl[i]->cq_pbl_dma); kfree(gl[i]); } qedf_free_bdq(qedf); } static int qedf_alloc_bdq(struct qedf_ctx *qedf) { int i; struct scsi_bd *pbl; u64 *list; /* Alloc dma memory for BDQ buffers */ for (i = 0; i < QEDF_BDQ_SIZE; i++) { qedf->bdq[i].buf_addr = dma_alloc_coherent(&qedf->pdev->dev, QEDF_BDQ_BUF_SIZE, &qedf->bdq[i].buf_dma, GFP_KERNEL); if (!qedf->bdq[i].buf_addr) { QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate BDQ " "buffer %d.\n", i); return -ENOMEM; } } /* Alloc dma memory for BDQ page buffer list */ qedf->bdq_pbl_mem_size = QEDF_BDQ_SIZE * sizeof(struct scsi_bd); qedf->bdq_pbl_mem_size = ALIGN(qedf->bdq_pbl_mem_size, QEDF_PAGE_SIZE); qedf->bdq_pbl = dma_alloc_coherent(&qedf->pdev->dev, qedf->bdq_pbl_mem_size, &qedf->bdq_pbl_dma, GFP_KERNEL); if (!qedf->bdq_pbl) { QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate BDQ PBL.\n"); return -ENOMEM; } QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "BDQ PBL addr=0x%p dma=%pad\n", qedf->bdq_pbl, &qedf->bdq_pbl_dma); /* * Populate BDQ PBL with physical and virtual address of individual * BDQ buffers */ pbl = (struct scsi_bd *)qedf->bdq_pbl; for (i = 0; i < QEDF_BDQ_SIZE; i++) { pbl->address.hi = cpu_to_le32(U64_HI(qedf->bdq[i].buf_dma)); pbl->address.lo = cpu_to_le32(U64_LO(qedf->bdq[i].buf_dma)); pbl->opaque.fcoe_opaque.hi = 0; /* Opaque lo data is an index into the BDQ array */ pbl->opaque.fcoe_opaque.lo = cpu_to_le32(i); pbl++; } /* Allocate list of PBL pages */ qedf->bdq_pbl_list = dma_alloc_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE, &qedf->bdq_pbl_list_dma, GFP_KERNEL); if (!qedf->bdq_pbl_list) { QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL pages.\n"); return -ENOMEM; } /* * Now populate PBL list with pages that contain pointers to the * individual buffers. */ qedf->bdq_pbl_list_num_entries = qedf->bdq_pbl_mem_size / QEDF_PAGE_SIZE; list = (u64 *)qedf->bdq_pbl_list; for (i = 0; i < qedf->bdq_pbl_list_num_entries; i++) { *list = qedf->bdq_pbl_dma; list++; } return 0; } static int qedf_alloc_global_queues(struct qedf_ctx *qedf) { u32 *list; int i; int status; u32 *pbl; dma_addr_t page; int num_pages; /* Allocate and map CQs, RQs */ /* * Number of global queues (CQ / RQ). This should * be <= number of available MSIX vectors for the PF */ if (!qedf->num_queues) { QEDF_ERR(&(qedf->dbg_ctx), "No MSI-X vectors available!\n"); return -ENOMEM; } /* * Make sure we allocated the PBL that will contain the physical * addresses of our queues */ if (!qedf->p_cpuq) { QEDF_ERR(&qedf->dbg_ctx, "p_cpuq is NULL.\n"); return -EINVAL; } qedf->global_queues = kzalloc((sizeof(struct global_queue *) * qedf->num_queues), GFP_KERNEL); if (!qedf->global_queues) { QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate global " "queues array ptr memory\n"); return -ENOMEM; } QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "qedf->global_queues=%p.\n", qedf->global_queues); /* Allocate DMA coherent buffers for BDQ */ status = qedf_alloc_bdq(qedf); if (status) { QEDF_ERR(&qedf->dbg_ctx, "Unable to allocate bdq.\n"); goto mem_alloc_failure; } /* Allocate a CQ and an associated PBL for each MSI-X vector */ for (i = 0; i < qedf->num_queues; i++) { qedf->global_queues[i] = kzalloc(sizeof(struct global_queue), GFP_KERNEL); if (!qedf->global_queues[i]) { QEDF_WARN(&(qedf->dbg_ctx), "Unable to allocate " "global queue %d.\n", i); status = -ENOMEM; goto mem_alloc_failure; } qedf->global_queues[i]->cq_mem_size = FCOE_PARAMS_CQ_NUM_ENTRIES * sizeof(struct fcoe_cqe); qedf->global_queues[i]->cq_mem_size = ALIGN(qedf->global_queues[i]->cq_mem_size, QEDF_PAGE_SIZE); qedf->global_queues[i]->cq_pbl_size = (qedf->global_queues[i]->cq_mem_size / PAGE_SIZE) * sizeof(void *); qedf->global_queues[i]->cq_pbl_size = ALIGN(qedf->global_queues[i]->cq_pbl_size, QEDF_PAGE_SIZE); qedf->global_queues[i]->cq = dma_alloc_coherent(&qedf->pdev->dev, qedf->global_queues[i]->cq_mem_size, &qedf->global_queues[i]->cq_dma, GFP_KERNEL); if (!qedf->global_queues[i]->cq) { QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq.\n"); status = -ENOMEM; goto mem_alloc_failure; } qedf->global_queues[i]->cq_pbl = dma_alloc_coherent(&qedf->pdev->dev, qedf->global_queues[i]->cq_pbl_size, &qedf->global_queues[i]->cq_pbl_dma, GFP_KERNEL); if (!qedf->global_queues[i]->cq_pbl) { QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq PBL.\n"); status = -ENOMEM; goto mem_alloc_failure; } /* Create PBL */ num_pages = qedf->global_queues[i]->cq_mem_size / QEDF_PAGE_SIZE; page = qedf->global_queues[i]->cq_dma; pbl = (u32 *)qedf->global_queues[i]->cq_pbl; while (num_pages--) { *pbl = U64_LO(page); pbl++; *pbl = U64_HI(page); pbl++; page += QEDF_PAGE_SIZE; } /* Set the initial consumer index for cq */ qedf->global_queues[i]->cq_cons_idx = 0; } list = (u32 *)qedf->p_cpuq; /* * The list is built as follows: CQ#0 PBL pointer, RQ#0 PBL pointer, * CQ#1 PBL pointer, RQ#1 PBL pointer, etc. Each PBL pointer points * to the physical address which contains an array of pointers to * the physical addresses of the specific queue pages. */ for (i = 0; i < qedf->num_queues; i++) { *list = U64_LO(qedf->global_queues[i]->cq_pbl_dma); list++; *list = U64_HI(qedf->global_queues[i]->cq_pbl_dma); list++; *list = U64_LO(0); list++; *list = U64_HI(0); list++; } return 0; mem_alloc_failure: qedf_free_global_queues(qedf); return status; } static int qedf_set_fcoe_pf_param(struct qedf_ctx *qedf) { u8 sq_num_pbl_pages; u32 sq_mem_size; u32 cq_mem_size; u32 cq_num_entries; int rval; /* * The number of completion queues/fastpath interrupts/status blocks * we allocation is the minimum off: * * Number of CPUs * Number allocated by qed for our PCI function */ qedf->num_queues = MIN_NUM_CPUS_MSIX(qedf); QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n", qedf->num_queues); qedf->p_cpuq = dma_alloc_coherent(&qedf->pdev->dev, qedf->num_queues * sizeof(struct qedf_glbl_q_params), &qedf->hw_p_cpuq, GFP_KERNEL); if (!qedf->p_cpuq) { QEDF_ERR(&(qedf->dbg_ctx), "dma_alloc_coherent failed.\n"); return 1; } rval = qedf_alloc_global_queues(qedf); if (rval) { QEDF_ERR(&(qedf->dbg_ctx), "Global queue allocation " "failed.\n"); return 1; } /* Calculate SQ PBL size in the same manner as in qedf_sq_alloc() */ sq_mem_size = SQ_NUM_ENTRIES * sizeof(struct fcoe_wqe); sq_mem_size = ALIGN(sq_mem_size, QEDF_PAGE_SIZE); sq_num_pbl_pages = (sq_mem_size / QEDF_PAGE_SIZE); /* Calculate CQ num entries */ cq_mem_size = FCOE_PARAMS_CQ_NUM_ENTRIES * sizeof(struct fcoe_cqe); cq_mem_size = ALIGN(cq_mem_size, QEDF_PAGE_SIZE); cq_num_entries = cq_mem_size / sizeof(struct fcoe_cqe); memset(&(qedf->pf_params), 0, sizeof(qedf->pf_params)); /* Setup the value for fcoe PF */ qedf->pf_params.fcoe_pf_params.num_cons = QEDF_MAX_SESSIONS; qedf->pf_params.fcoe_pf_params.num_tasks = FCOE_PARAMS_NUM_TASKS; qedf->pf_params.fcoe_pf_params.glbl_q_params_addr = (u64)qedf->hw_p_cpuq; qedf->pf_params.fcoe_pf_params.sq_num_pbl_pages = sq_num_pbl_pages; qedf->pf_params.fcoe_pf_params.rq_buffer_log_size = 0; qedf->pf_params.fcoe_pf_params.cq_num_entries = cq_num_entries; qedf->pf_params.fcoe_pf_params.num_cqs = qedf->num_queues; /* log_page_size: 12 for 4KB pages */ qedf->pf_params.fcoe_pf_params.log_page_size = ilog2(QEDF_PAGE_SIZE); qedf->pf_params.fcoe_pf_params.mtu = 9000; qedf->pf_params.fcoe_pf_params.gl_rq_pi = QEDF_FCOE_PARAMS_GL_RQ_PI; qedf->pf_params.fcoe_pf_params.gl_cmd_pi = QEDF_FCOE_PARAMS_GL_CMD_PI; /* BDQ address and size */ qedf->pf_params.fcoe_pf_params.bdq_pbl_base_addr[0] = qedf->bdq_pbl_list_dma; qedf->pf_params.fcoe_pf_params.bdq_pbl_num_entries[0] = qedf->bdq_pbl_list_num_entries; qedf->pf_params.fcoe_pf_params.rq_buffer_size = QEDF_BDQ_BUF_SIZE; QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "bdq_list=%p bdq_pbl_list_dma=%llx bdq_pbl_list_entries=%d.\n", qedf->bdq_pbl_list, qedf->pf_params.fcoe_pf_params.bdq_pbl_base_addr[0], qedf->pf_params.fcoe_pf_params.bdq_pbl_num_entries[0]); QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "cq_num_entries=%d.\n", qedf->pf_params.fcoe_pf_params.cq_num_entries); return 0; } /* Free DMA coherent memory for array of queue pointers we pass to qed */ static void qedf_free_fcoe_pf_param(struct qedf_ctx *qedf) { size_t size = 0; if (qedf->p_cpuq) { size = qedf->num_queues * sizeof(struct qedf_glbl_q_params); dma_free_coherent(&qedf->pdev->dev, size, qedf->p_cpuq, qedf->hw_p_cpuq); } qedf_free_global_queues(qedf); kfree(qedf->global_queues); } /* * PCI driver functions */ static const struct pci_device_id qedf_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165c) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x8080) }, {0} }; MODULE_DEVICE_TABLE(pci, qedf_pci_tbl); static struct pci_driver qedf_pci_driver = { .name = QEDF_MODULE_NAME, .id_table = qedf_pci_tbl, .probe = qedf_probe, .remove = qedf_remove, .shutdown = qedf_shutdown, .suspend = qedf_suspend, }; static int __qedf_probe(struct pci_dev *pdev, int mode) { int rc = -EINVAL; struct fc_lport *lport; struct qedf_ctx *qedf = NULL; struct Scsi_Host *host; bool is_vf = false; struct qed_ll2_params params; char host_buf[20]; struct qed_link_params link_params; int status; void *task_start, *task_end; struct qed_slowpath_params slowpath_params; struct qed_probe_params qed_params; u16 retry_cnt = 10; /* * When doing error recovery we didn't reap the lport so don't try * to reallocate it. */ retry_probe: if (mode == QEDF_MODE_RECOVERY) msleep(2000); if (mode != QEDF_MODE_RECOVERY) { lport = libfc_host_alloc(&qedf_host_template, sizeof(struct qedf_ctx)); if (!lport) { QEDF_ERR(NULL, "Could not allocate lport.\n"); rc = -ENOMEM; goto err0; } fc_disc_init(lport); /* Initialize qedf_ctx */ qedf = lport_priv(lport); set_bit(QEDF_PROBING, &qedf->flags); qedf->lport = lport; qedf->ctlr.lp = lport; qedf->pdev = pdev; qedf->dbg_ctx.pdev = pdev; qedf->dbg_ctx.host_no = lport->host->host_no; spin_lock_init(&qedf->hba_lock); INIT_LIST_HEAD(&qedf->fcports); qedf->curr_conn_id = QEDF_MAX_SESSIONS - 1; atomic_set(&qedf->num_offloads, 0); qedf->stop_io_on_error = false; pci_set_drvdata(pdev, qedf); init_completion(&qedf->fipvlan_compl); mutex_init(&qedf->stats_mutex); mutex_init(&qedf->flush_mutex); qedf->flogi_pending = 0; QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, "QLogic FastLinQ FCoE Module qedf %s, " "FW %d.%d.%d.%d\n", QEDF_VERSION, FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION, FW_ENGINEERING_VERSION); } else { /* Init pointers during recovery */ qedf = pci_get_drvdata(pdev); set_bit(QEDF_PROBING, &qedf->flags); lport = qedf->lport; } QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe started.\n"); host = lport->host; /* Allocate mempool for qedf_io_work structs */ qedf->io_mempool = mempool_create_slab_pool(QEDF_IO_WORK_MIN, qedf_io_work_cache); if (qedf->io_mempool == NULL) { QEDF_ERR(&(qedf->dbg_ctx), "qedf->io_mempool is NULL.\n"); goto err1; } QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, "qedf->io_mempool=%p.\n", qedf->io_mempool); sprintf(host_buf, "qedf_%u_link", qedf->lport->host->host_no); qedf->link_update_wq = create_workqueue(host_buf); INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update); INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery); INIT_DELAYED_WORK(&qedf->grcdump_work, qedf_wq_grcdump); INIT_DELAYED_WORK(&qedf->stag_work, qedf_stag_change_work); qedf->fipvlan_retries = qedf_fipvlan_retries; /* Set a default prio in case DCBX doesn't converge */ if (qedf_default_prio > -1) { /* * This is the case where we pass a modparam in so we want to * honor it even if dcbx doesn't converge. */ qedf->prio = qedf_default_prio; } else qedf->prio = QEDF_DEFAULT_PRIO; /* * Common probe. Takes care of basic hardware init and pci_* * functions. */ memset(&qed_params, 0, sizeof(qed_params)); qed_params.protocol = QED_PROTOCOL_FCOE; qed_params.dp_module = qedf_dp_module; qed_params.dp_level = qedf_dp_level; qed_params.is_vf = is_vf; qedf->cdev = qed_ops->common->probe(pdev, &qed_params); if (!qedf->cdev) { if ((mode == QEDF_MODE_RECOVERY) && retry_cnt) { QEDF_ERR(&qedf->dbg_ctx, "Retry %d initialize hardware\n", retry_cnt); retry_cnt--; goto retry_probe; } QEDF_ERR(&qedf->dbg_ctx, "common probe failed.\n"); rc = -ENODEV; goto err1; } /* Learn information crucial for qedf to progress */ rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info); if (rc) { QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n"); goto err1; } QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "dev_info: num_hwfns=%d affin_hwfn_idx=%d.\n", qedf->dev_info.common.num_hwfns, qed_ops->common->get_affin_hwfn_idx(qedf->cdev)); /* queue allocation code should come here * order should be * slowpath_start * status block allocation * interrupt registration (to get min number of queues) * set_fcoe_pf_param * qed_sp_fcoe_func_start */ rc = qedf_set_fcoe_pf_param(qedf); if (rc) { QEDF_ERR(&(qedf->dbg_ctx), "Cannot set fcoe pf param.\n"); goto err2; } qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params); /* Learn information crucial for qedf to progress */ rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info); if (rc) { QEDF_ERR(&qedf->dbg_ctx, "Failed to fill dev info.\n"); goto err2; } if (mode != QEDF_MODE_RECOVERY) { qedf->devlink = qed_ops->common->devlink_register(qedf->cdev); if (IS_ERR(qedf->devlink)) { QEDF_ERR(&qedf->dbg_ctx, "Cannot register devlink\n"); rc = PTR_ERR(qedf->devlink); qedf->devlink = NULL; goto err2; } } /* Record BDQ producer doorbell addresses */ qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr; qedf->bdq_secondary_prod = qedf->dev_info.secondary_bdq_rq_addr; QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "BDQ primary_prod=%p secondary_prod=%p.\n", qedf->bdq_primary_prod, qedf->bdq_secondary_prod); qed_ops->register_ops(qedf->cdev, &qedf_cb_ops, qedf); rc = qedf_prepare_sb(qedf); if (rc) { QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n"); goto err2; } /* Start the Slowpath-process */ slowpath_params.int_mode = QED_INT_MODE_MSIX; slowpath_params.drv_major = QEDF_DRIVER_MAJOR_VER; slowpath_params.drv_minor = QEDF_DRIVER_MINOR_VER; slowpath_params.drv_rev = QEDF_DRIVER_REV_VER; slowpath_params.drv_eng = QEDF_DRIVER_ENG_VER; strncpy(slowpath_params.name, "qedf", QED_DRV_VER_STR_SIZE); rc = qed_ops->common->slowpath_start(qedf->cdev, &slowpath_params); if (rc) { QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n"); goto err2; } /* * update_pf_params needs to be called before and after slowpath * start */ qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params); /* Setup interrupts */ rc = qedf_setup_int(qedf); if (rc) { QEDF_ERR(&qedf->dbg_ctx, "Setup interrupts failed.\n"); goto err3; } rc = qed_ops->start(qedf->cdev, &qedf->tasks); if (rc) { QEDF_ERR(&(qedf->dbg_ctx), "Cannot start FCoE function.\n"); goto err4; } task_start = qedf_get_task_mem(&qedf->tasks, 0); task_end = qedf_get_task_mem(&qedf->tasks, MAX_TID_BLOCKS_FCOE - 1); QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Task context start=%p, " "end=%p block_size=%u.\n", task_start, task_end, qedf->tasks.size); /* * We need to write the number of BDs in the BDQ we've preallocated so * the f/w will do a prefetch and we'll get an unsolicited CQE when a * packet arrives. */ qedf->bdq_prod_idx = QEDF_BDQ_SIZE; QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Writing %d to primary and secondary BDQ doorbell registers.\n", qedf->bdq_prod_idx); writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod); readw(qedf->bdq_primary_prod); writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod); readw(qedf->bdq_secondary_prod); qed_ops->common->set_power_state(qedf->cdev, PCI_D0); /* Now that the dev_info struct has been filled in set the MAC * address */ ether_addr_copy(qedf->mac, qedf->dev_info.common.hw_mac); QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "MAC address is %pM.\n", qedf->mac); /* * Set the WWNN and WWPN in the following way: * * If the info we get from qed is non-zero then use that to set the * WWPN and WWNN. Otherwise fall back to use fcoe_wwn_from_mac() based * on the MAC address. */ if (qedf->dev_info.wwnn != 0 && qedf->dev_info.wwpn != 0) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Setting WWPN and WWNN from qed dev_info.\n"); qedf->wwnn = qedf->dev_info.wwnn; qedf->wwpn = qedf->dev_info.wwpn; } else { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Setting WWPN and WWNN using fcoe_wwn_from_mac().\n"); qedf->wwnn = fcoe_wwn_from_mac(qedf->mac, 1, 0); qedf->wwpn = fcoe_wwn_from_mac(qedf->mac, 2, 0); } QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "WWNN=%016llx " "WWPN=%016llx.\n", qedf->wwnn, qedf->wwpn); sprintf(host_buf, "host_%d", host->host_no); qed_ops->common->set_name(qedf->cdev, host_buf); /* Allocate cmd mgr */ qedf->cmd_mgr = qedf_cmd_mgr_alloc(qedf); if (!qedf->cmd_mgr) { QEDF_ERR(&(qedf->dbg_ctx), "Failed to allocate cmd mgr.\n"); rc = -ENOMEM; goto err5; } if (mode != QEDF_MODE_RECOVERY) { host->transportt = qedf_fc_transport_template; host->max_lun = qedf_max_lun; host->max_cmd_len = QEDF_MAX_CDB_LEN; host->max_id = QEDF_MAX_SESSIONS; host->can_queue = FCOE_PARAMS_NUM_TASKS; rc = scsi_add_host(host, &pdev->dev); if (rc) { QEDF_WARN(&qedf->dbg_ctx, "Error adding Scsi_Host rc=0x%x.\n", rc); goto err6; } } memset(&params, 0, sizeof(params)); params.mtu = QEDF_LL2_BUF_SIZE; ether_addr_copy(params.ll2_mac_address, qedf->mac); /* Start LL2 processing thread */ snprintf(host_buf, 20, "qedf_%d_ll2", host->host_no); qedf->ll2_recv_wq = create_workqueue(host_buf); if (!qedf->ll2_recv_wq) { QEDF_ERR(&(qedf->dbg_ctx), "Failed to LL2 workqueue.\n"); rc = -ENOMEM; goto err7; } #ifdef CONFIG_DEBUG_FS qedf_dbg_host_init(&(qedf->dbg_ctx), qedf_debugfs_ops, qedf_dbg_fops); #endif /* Start LL2 */ qed_ops->ll2->register_cb_ops(qedf->cdev, &qedf_ll2_cb_ops, qedf); rc = qed_ops->ll2->start(qedf->cdev, &params); if (rc) { QEDF_ERR(&(qedf->dbg_ctx), "Could not start Light L2.\n"); goto err7; } set_bit(QEDF_LL2_STARTED, &qedf->flags); /* Set initial FIP/FCoE VLAN to NULL */ qedf->vlan_id = 0; /* * No need to setup fcoe_ctlr or fc_lport objects during recovery since * they were not reaped during the unload process. */ if (mode != QEDF_MODE_RECOVERY) { /* Setup imbedded fcoe controller */ qedf_fcoe_ctlr_setup(qedf); /* Setup lport */ rc = qedf_lport_setup(qedf); if (rc) { QEDF_ERR(&(qedf->dbg_ctx), "qedf_lport_setup failed.\n"); goto err7; } } sprintf(host_buf, "qedf_%u_timer", qedf->lport->host->host_no); qedf->timer_work_queue = create_workqueue(host_buf); if (!qedf->timer_work_queue) { QEDF_ERR(&(qedf->dbg_ctx), "Failed to start timer " "workqueue.\n"); rc = -ENOMEM; goto err7; } /* DPC workqueue is not reaped during recovery unload */ if (mode != QEDF_MODE_RECOVERY) { sprintf(host_buf, "qedf_%u_dpc", qedf->lport->host->host_no); qedf->dpc_wq = create_workqueue(host_buf); } INIT_DELAYED_WORK(&qedf->recovery_work, qedf_recovery_handler); /* * GRC dump and sysfs parameters are not reaped during the recovery * unload process. */ if (mode != QEDF_MODE_RECOVERY) { qedf->grcdump_size = qed_ops->common->dbg_all_data_size(qedf->cdev); if (qedf->grcdump_size) { rc = qedf_alloc_grc_dump_buf(&qedf->grcdump, qedf->grcdump_size); if (rc) { QEDF_ERR(&(qedf->dbg_ctx), "GRC Dump buffer alloc failed.\n"); qedf->grcdump = NULL; } QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "grcdump: addr=%p, size=%u.\n", qedf->grcdump, qedf->grcdump_size); } qedf_create_sysfs_ctx_attr(qedf); /* Initialize I/O tracing for this adapter */ spin_lock_init(&qedf->io_trace_lock); qedf->io_trace_idx = 0; } init_completion(&qedf->flogi_compl); status = qed_ops->common->update_drv_state(qedf->cdev, true); if (status) QEDF_ERR(&(qedf->dbg_ctx), "Failed to send drv state to MFW.\n"); memset(&link_params, 0, sizeof(struct qed_link_params)); link_params.link_up = true; status = qed_ops->common->set_link(qedf->cdev, &link_params); if (status) QEDF_WARN(&(qedf->dbg_ctx), "set_link failed.\n"); /* Start/restart discovery */ if (mode == QEDF_MODE_RECOVERY) fcoe_ctlr_link_up(&qedf->ctlr); else fc_fabric_login(lport); QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n"); clear_bit(QEDF_PROBING, &qedf->flags); /* All good */ return 0; err7: if (qedf->ll2_recv_wq) destroy_workqueue(qedf->ll2_recv_wq); fc_remove_host(qedf->lport->host); scsi_remove_host(qedf->lport->host); #ifdef CONFIG_DEBUG_FS qedf_dbg_host_exit(&(qedf->dbg_ctx)); #endif err6: qedf_cmd_mgr_free(qedf->cmd_mgr); err5: qed_ops->stop(qedf->cdev); err4: qedf_free_fcoe_pf_param(qedf); qedf_sync_free_irqs(qedf); err3: qed_ops->common->slowpath_stop(qedf->cdev); err2: qed_ops->common->remove(qedf->cdev); err1: scsi_host_put(lport->host); err0: return rc; } static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id) { return __qedf_probe(pdev, QEDF_MODE_NORMAL); } static void __qedf_remove(struct pci_dev *pdev, int mode) { struct qedf_ctx *qedf; int rc; if (!pdev) { QEDF_ERR(NULL, "pdev is NULL.\n"); return; } qedf = pci_get_drvdata(pdev); /* * Prevent race where we're in board disable work and then try to * rmmod the module. */ if (test_bit(QEDF_UNLOADING, &qedf->flags)) { QEDF_ERR(&qedf->dbg_ctx, "Already removing PCI function.\n"); return; } if (mode != QEDF_MODE_RECOVERY) set_bit(QEDF_UNLOADING, &qedf->flags); /* Logoff the fabric to upload all connections */ if (mode == QEDF_MODE_RECOVERY) fcoe_ctlr_link_down(&qedf->ctlr); else fc_fabric_logoff(qedf->lport); if (!qedf_wait_for_upload(qedf)) QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n"); #ifdef CONFIG_DEBUG_FS qedf_dbg_host_exit(&(qedf->dbg_ctx)); #endif /* Stop any link update handling */ cancel_delayed_work_sync(&qedf->link_update); destroy_workqueue(qedf->link_update_wq); qedf->link_update_wq = NULL; if (qedf->timer_work_queue) destroy_workqueue(qedf->timer_work_queue); /* Stop Light L2 */ clear_bit(QEDF_LL2_STARTED, &qedf->flags); qed_ops->ll2->stop(qedf->cdev); if (qedf->ll2_recv_wq) destroy_workqueue(qedf->ll2_recv_wq); /* Stop fastpath */ qedf_sync_free_irqs(qedf); qedf_destroy_sb(qedf); /* * During recovery don't destroy OS constructs that represent the * physical port. */ if (mode != QEDF_MODE_RECOVERY) { qedf_free_grc_dump_buf(&qedf->grcdump); qedf_remove_sysfs_ctx_attr(qedf); /* Remove all SCSI/libfc/libfcoe structures */ fcoe_ctlr_destroy(&qedf->ctlr); fc_lport_destroy(qedf->lport); fc_remove_host(qedf->lport->host); scsi_remove_host(qedf->lport->host); } qedf_cmd_mgr_free(qedf->cmd_mgr); if (mode != QEDF_MODE_RECOVERY) { fc_exch_mgr_free(qedf->lport); fc_lport_free_stats(qedf->lport); /* Wait for all vports to be reaped */ qedf_wait_for_vport_destroy(qedf); } /* * Now that all connections have been uploaded we can stop the * rest of the qed operations */ qed_ops->stop(qedf->cdev); if (mode != QEDF_MODE_RECOVERY) { if (qedf->dpc_wq) { /* Stop general DPC handling */ destroy_workqueue(qedf->dpc_wq); qedf->dpc_wq = NULL; } } /* Final shutdown for the board */ qedf_free_fcoe_pf_param(qedf); if (mode != QEDF_MODE_RECOVERY) { qed_ops->common->set_power_state(qedf->cdev, PCI_D0); pci_set_drvdata(pdev, NULL); } rc = qed_ops->common->update_drv_state(qedf->cdev, false); if (rc) QEDF_ERR(&(qedf->dbg_ctx), "Failed to send drv state to MFW.\n"); if (mode != QEDF_MODE_RECOVERY && qedf->devlink) { qed_ops->common->devlink_unregister(qedf->devlink); qedf->devlink = NULL; } qed_ops->common->slowpath_stop(qedf->cdev); qed_ops->common->remove(qedf->cdev); mempool_destroy(qedf->io_mempool); /* Only reap the Scsi_host on a real removal */ if (mode != QEDF_MODE_RECOVERY) scsi_host_put(qedf->lport->host); } static void qedf_remove(struct pci_dev *pdev) { /* Check to make sure this function wasn't already disabled */ if (!atomic_read(&pdev->enable_cnt)) return; __qedf_remove(pdev, QEDF_MODE_NORMAL); } void qedf_wq_grcdump(struct work_struct *work) { struct qedf_ctx *qedf = container_of(work, struct qedf_ctx, grcdump_work.work); QEDF_ERR(&(qedf->dbg_ctx), "Collecting GRC dump.\n"); qedf_capture_grc_dump(qedf); } void qedf_schedule_hw_err_handler(void *dev, enum qed_hw_err_type err_type) { struct qedf_ctx *qedf = dev; QEDF_ERR(&(qedf->dbg_ctx), "Hardware error handler scheduled, event=%d.\n", err_type); if (test_bit(QEDF_IN_RECOVERY, &qedf->flags)) { QEDF_ERR(&(qedf->dbg_ctx), "Already in recovery, not scheduling board disable work.\n"); return; } switch (err_type) { case QED_HW_ERR_FAN_FAIL: schedule_delayed_work(&qedf->board_disable_work, 0); break; case QED_HW_ERR_MFW_RESP_FAIL: case QED_HW_ERR_HW_ATTN: case QED_HW_ERR_DMAE_FAIL: case QED_HW_ERR_FW_ASSERT: /* Prevent HW attentions from being reasserted */ qed_ops->common->attn_clr_enable(qedf->cdev, true); break; case QED_HW_ERR_RAMROD_FAIL: /* Prevent HW attentions from being reasserted */ qed_ops->common->attn_clr_enable(qedf->cdev, true); if (qedf_enable_recovery && qedf->devlink) qed_ops->common->report_fatal_error(qedf->devlink, err_type); break; default: break; } } /* * Protocol TLV handler */ void qedf_get_protocol_tlv_data(void *dev, void *data) { struct qedf_ctx *qedf = dev; struct qed_mfw_tlv_fcoe *fcoe = data; struct fc_lport *lport; struct Scsi_Host *host; struct fc_host_attrs *fc_host; struct fc_host_statistics *hst; if (!qedf) { QEDF_ERR(NULL, "qedf is null.\n"); return; } if (test_bit(QEDF_PROBING, &qedf->flags)) { QEDF_ERR(&qedf->dbg_ctx, "Function is still probing.\n"); return; } lport = qedf->lport; host = lport->host; fc_host = shost_to_fc_host(host); /* Force a refresh of the fc_host stats including offload stats */ hst = qedf_fc_get_host_stats(host); fcoe->qos_pri_set = true; fcoe->qos_pri = 3; /* Hard coded to 3 in driver */ fcoe->ra_tov_set = true; fcoe->ra_tov = lport->r_a_tov; fcoe->ed_tov_set = true; fcoe->ed_tov = lport->e_d_tov; fcoe->npiv_state_set = true; fcoe->npiv_state = 1; /* NPIV always enabled */ fcoe->num_npiv_ids_set = true; fcoe->num_npiv_ids = fc_host->npiv_vports_inuse; /* Certain attributes we only want to set if we've selected an FCF */ if (qedf->ctlr.sel_fcf) { fcoe->switch_name_set = true; u64_to_wwn(qedf->ctlr.sel_fcf->switch_name, fcoe->switch_name); } fcoe->port_state_set = true; /* For qedf we're either link down or fabric attach */ if (lport->link_up) fcoe->port_state = QED_MFW_TLV_PORT_STATE_FABRIC; else fcoe->port_state = QED_MFW_TLV_PORT_STATE_OFFLINE; fcoe->link_failures_set = true; fcoe->link_failures = (u16)hst->link_failure_count; fcoe->fcoe_txq_depth_set = true; fcoe->fcoe_rxq_depth_set = true; fcoe->fcoe_rxq_depth = FCOE_PARAMS_NUM_TASKS; fcoe->fcoe_txq_depth = FCOE_PARAMS_NUM_TASKS; fcoe->fcoe_rx_frames_set = true; fcoe->fcoe_rx_frames = hst->rx_frames; fcoe->fcoe_tx_frames_set = true; fcoe->fcoe_tx_frames = hst->tx_frames; fcoe->fcoe_rx_bytes_set = true; fcoe->fcoe_rx_bytes = hst->fcp_input_megabytes * 1000000; fcoe->fcoe_tx_bytes_set = true; fcoe->fcoe_tx_bytes = hst->fcp_output_megabytes * 1000000; fcoe->crc_count_set = true; fcoe->crc_count = hst->invalid_crc_count; fcoe->tx_abts_set = true; fcoe->tx_abts = hst->fcp_packet_aborts; fcoe->tx_lun_rst_set = true; fcoe->tx_lun_rst = qedf->lun_resets; fcoe->abort_task_sets_set = true; fcoe->abort_task_sets = qedf->packet_aborts; fcoe->scsi_busy_set = true; fcoe->scsi_busy = qedf->busy; fcoe->scsi_tsk_full_set = true; fcoe->scsi_tsk_full = qedf->task_set_fulls; } /* Deferred work function to perform soft context reset on STAG change */ void qedf_stag_change_work(struct work_struct *work) { struct qedf_ctx *qedf = container_of(work, struct qedf_ctx, stag_work.work); printk_ratelimited("[%s]:[%s:%d]:%d: Performing software context reset.", dev_name(&qedf->pdev->dev), __func__, __LINE__, qedf->dbg_ctx.host_no); qedf_ctx_soft_reset(qedf->lport); } static void qedf_shutdown(struct pci_dev *pdev) { __qedf_remove(pdev, QEDF_MODE_NORMAL); } static int qedf_suspend(struct pci_dev *pdev, pm_message_t state) { struct qedf_ctx *qedf; if (!pdev) { QEDF_ERR(NULL, "pdev is NULL.\n"); return -ENODEV; } qedf = pci_get_drvdata(pdev); QEDF_ERR(&qedf->dbg_ctx, "%s: Device does not support suspend operation\n", __func__); return -EPERM; } /* * Recovery handler code */ static void qedf_schedule_recovery_handler(void *dev) { struct qedf_ctx *qedf = dev; QEDF_ERR(&qedf->dbg_ctx, "Recovery handler scheduled.\n"); schedule_delayed_work(&qedf->recovery_work, 0); } static void qedf_recovery_handler(struct work_struct *work) { struct qedf_ctx *qedf = container_of(work, struct qedf_ctx, recovery_work.work); if (test_and_set_bit(QEDF_IN_RECOVERY, &qedf->flags)) return; /* * Call common_ops->recovery_prolog to allow the MFW to quiesce * any PCI transactions. */ qed_ops->common->recovery_prolog(qedf->cdev); QEDF_ERR(&qedf->dbg_ctx, "Recovery work start.\n"); __qedf_remove(qedf->pdev, QEDF_MODE_RECOVERY); /* * Reset link and dcbx to down state since we will not get a link down * event from the MFW but calling __qedf_remove will essentially be a * link down event. */ atomic_set(&qedf->link_state, QEDF_LINK_DOWN); atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING); __qedf_probe(qedf->pdev, QEDF_MODE_RECOVERY); clear_bit(QEDF_IN_RECOVERY, &qedf->flags); QEDF_ERR(&qedf->dbg_ctx, "Recovery work complete.\n"); } /* Generic TLV data callback */ void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data) { struct qedf_ctx *qedf; if (!dev) { QEDF_INFO(NULL, QEDF_LOG_EVT, "dev is NULL so ignoring get_generic_tlv_data request.\n"); return; } qedf = (struct qedf_ctx *)dev; memset(data, 0, sizeof(struct qed_generic_tlvs)); ether_addr_copy(data->mac[0], qedf->mac); } /* * Module Init/Remove */ static int __init qedf_init(void) { int ret; /* If debug=1 passed, set the default log mask */ if (qedf_debug == QEDF_LOG_DEFAULT) qedf_debug = QEDF_DEFAULT_LOG_MASK; /* * Check that default prio for FIP/FCoE traffic is between 0..7 if a * value has been set */ if (qedf_default_prio > -1) if (qedf_default_prio > 7) { qedf_default_prio = QEDF_DEFAULT_PRIO; QEDF_ERR(NULL, "FCoE/FIP priority out of range, resetting to %d.\n", QEDF_DEFAULT_PRIO); } /* Print driver banner */ QEDF_INFO(NULL, QEDF_LOG_INFO, "%s v%s.\n", QEDF_DESCR, QEDF_VERSION); /* Create kmem_cache for qedf_io_work structs */ qedf_io_work_cache = kmem_cache_create("qedf_io_work_cache", sizeof(struct qedf_io_work), 0, SLAB_HWCACHE_ALIGN, NULL); if (qedf_io_work_cache == NULL) { QEDF_ERR(NULL, "qedf_io_work_cache is NULL.\n"); goto err1; } QEDF_INFO(NULL, QEDF_LOG_DISC, "qedf_io_work_cache=%p.\n", qedf_io_work_cache); qed_ops = qed_get_fcoe_ops(); if (!qed_ops) { QEDF_ERR(NULL, "Failed to get qed fcoe operations\n"); goto err1; } #ifdef CONFIG_DEBUG_FS qedf_dbg_init("qedf"); #endif qedf_fc_transport_template = fc_attach_transport(&qedf_fc_transport_fn); if (!qedf_fc_transport_template) { QEDF_ERR(NULL, "Could not register with FC transport\n"); goto err2; } qedf_fc_vport_transport_template = fc_attach_transport(&qedf_fc_vport_transport_fn); if (!qedf_fc_vport_transport_template) { QEDF_ERR(NULL, "Could not register vport template with FC " "transport\n"); goto err3; } qedf_io_wq = create_workqueue("qedf_io_wq"); if (!qedf_io_wq) { QEDF_ERR(NULL, "Could not create qedf_io_wq.\n"); goto err4; } qedf_cb_ops.get_login_failures = qedf_get_login_failures; ret = pci_register_driver(&qedf_pci_driver); if (ret) { QEDF_ERR(NULL, "Failed to register driver\n"); goto err5; } return 0; err5: destroy_workqueue(qedf_io_wq); err4: fc_release_transport(qedf_fc_vport_transport_template); err3: fc_release_transport(qedf_fc_transport_template); err2: #ifdef CONFIG_DEBUG_FS qedf_dbg_exit(); #endif qed_put_fcoe_ops(); err1: return -EINVAL; } static void __exit qedf_cleanup(void) { pci_unregister_driver(&qedf_pci_driver); destroy_workqueue(qedf_io_wq); fc_release_transport(qedf_fc_vport_transport_template); fc_release_transport(qedf_fc_transport_template); #ifdef CONFIG_DEBUG_FS qedf_dbg_exit(); #endif qed_put_fcoe_ops(); kmem_cache_destroy(qedf_io_work_cache); } MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx FCoE Module"); MODULE_AUTHOR("QLogic Corporation"); MODULE_VERSION(QEDF_VERSION); module_init(qedf_init); module_exit(qedf_cleanup);
linux-master
drivers/scsi/qedf/qedf_main.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic FCoE Offload Driver * Copyright (c) 2016-2018 Cavium Inc. */ #include "qedf.h" inline bool qedf_is_vport(struct qedf_ctx *qedf) { return qedf->lport->vport != NULL; } /* Get base qedf for physical port from vport */ static struct qedf_ctx *qedf_get_base_qedf(struct qedf_ctx *qedf) { struct fc_lport *lport; struct fc_lport *base_lport; if (!(qedf_is_vport(qedf))) return NULL; lport = qedf->lport; base_lport = shost_priv(vport_to_shost(lport->vport)); return lport_priv(base_lport); } static ssize_t fcoe_mac_show(struct device *dev, struct device_attribute *attr, char *buf) { struct fc_lport *lport = shost_priv(class_to_shost(dev)); u32 port_id; u8 lport_src_id[3]; u8 fcoe_mac[6]; port_id = fc_host_port_id(lport->host); lport_src_id[2] = (port_id & 0x000000FF); lport_src_id[1] = (port_id & 0x0000FF00) >> 8; lport_src_id[0] = (port_id & 0x00FF0000) >> 16; fc_fcoe_set_mac(fcoe_mac, lport_src_id); return scnprintf(buf, PAGE_SIZE, "%pM\n", fcoe_mac); } static ssize_t fka_period_show(struct device *dev, struct device_attribute *attr, char *buf) { struct fc_lport *lport = shost_priv(class_to_shost(dev)); struct qedf_ctx *qedf = lport_priv(lport); int fka_period = -1; if (qedf_is_vport(qedf)) qedf = qedf_get_base_qedf(qedf); if (qedf->ctlr.sel_fcf) fka_period = qedf->ctlr.sel_fcf->fka_period; return scnprintf(buf, PAGE_SIZE, "%d\n", fka_period); } static DEVICE_ATTR_RO(fcoe_mac); static DEVICE_ATTR_RO(fka_period); static struct attribute *qedf_host_attrs[] = { &dev_attr_fcoe_mac.attr, &dev_attr_fka_period.attr, NULL, }; static const struct attribute_group qedf_host_attr_group = { .attrs = qedf_host_attrs }; const struct attribute_group *qedf_host_groups[] = { &qedf_host_attr_group, NULL }; extern const struct qed_fcoe_ops *qed_ops; void qedf_capture_grc_dump(struct qedf_ctx *qedf) { struct qedf_ctx *base_qedf; /* Make sure we use the base qedf to take the GRC dump */ if (qedf_is_vport(qedf)) base_qedf = qedf_get_base_qedf(qedf); else base_qedf = qedf; if (test_bit(QEDF_GRCDUMP_CAPTURE, &base_qedf->flags)) { QEDF_INFO(&(base_qedf->dbg_ctx), QEDF_LOG_INFO, "GRC Dump already captured.\n"); return; } qedf_get_grc_dump(base_qedf->cdev, qed_ops->common, &base_qedf->grcdump, &base_qedf->grcdump_size); QEDF_ERR(&(base_qedf->dbg_ctx), "GRC Dump captured.\n"); set_bit(QEDF_GRCDUMP_CAPTURE, &base_qedf->flags); qedf_uevent_emit(base_qedf->lport->host, QEDF_UEVENT_CODE_GRCDUMP, NULL); } static ssize_t qedf_sysfs_read_grcdump(struct file *filep, struct kobject *kobj, struct bin_attribute *ba, char *buf, loff_t off, size_t count) { ssize_t ret = 0; struct fc_lport *lport = shost_priv(dev_to_shost(container_of(kobj, struct device, kobj))); struct qedf_ctx *qedf = lport_priv(lport); if (test_bit(QEDF_GRCDUMP_CAPTURE, &qedf->flags)) { ret = memory_read_from_buffer(buf, count, &off, qedf->grcdump, qedf->grcdump_size); } else { QEDF_ERR(&(qedf->dbg_ctx), "GRC Dump not captured!\n"); } return ret; } static ssize_t qedf_sysfs_write_grcdump(struct file *filep, struct kobject *kobj, struct bin_attribute *ba, char *buf, loff_t off, size_t count) { struct fc_lport *lport = NULL; struct qedf_ctx *qedf = NULL; long reading; int ret = 0; if (off != 0) return ret; lport = shost_priv(dev_to_shost(container_of(kobj, struct device, kobj))); qedf = lport_priv(lport); buf[1] = 0; ret = kstrtol(buf, 10, &reading); if (ret) { QEDF_ERR(&(qedf->dbg_ctx), "Invalid input, err(%d)\n", ret); return ret; } switch (reading) { case 0: memset(qedf->grcdump, 0, qedf->grcdump_size); clear_bit(QEDF_GRCDUMP_CAPTURE, &qedf->flags); break; case 1: qedf_capture_grc_dump(qedf); break; } return count; } static struct bin_attribute sysfs_grcdump_attr = { .attr = { .name = "grcdump", .mode = S_IRUSR | S_IWUSR, }, .size = 0, .read = qedf_sysfs_read_grcdump, .write = qedf_sysfs_write_grcdump, }; static struct sysfs_bin_attrs bin_file_entries[] = { {"grcdump", &sysfs_grcdump_attr}, {NULL}, }; void qedf_create_sysfs_ctx_attr(struct qedf_ctx *qedf) { qedf_create_sysfs_attr(qedf->lport->host, bin_file_entries); } void qedf_remove_sysfs_ctx_attr(struct qedf_ctx *qedf) { qedf_remove_sysfs_attr(qedf->lport->host, bin_file_entries); }
linux-master
drivers/scsi/qedf/qedf_attr.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic FCoE Offload Driver * Copyright (c) 2016-2018 Cavium Inc. */ #include <linux/if_ether.h> #include <linux/if_vlan.h> #include "qedf.h" extern const struct qed_fcoe_ops *qed_ops; /* * FIP VLAN functions that will eventually move to libfcoe. */ void qedf_fcoe_send_vlan_req(struct qedf_ctx *qedf) { struct sk_buff *skb; char *eth_fr; struct fip_vlan *vlan; #define MY_FIP_ALL_FCF_MACS ((__u8[6]) { 1, 0x10, 0x18, 1, 0, 2 }) static u8 my_fcoe_all_fcfs[ETH_ALEN] = MY_FIP_ALL_FCF_MACS; unsigned long flags = 0; int rc; skb = dev_alloc_skb(sizeof(struct fip_vlan)); if (!skb) { QEDF_ERR(&qedf->dbg_ctx, "Failed to allocate skb.\n"); return; } eth_fr = (char *)skb->data; vlan = (struct fip_vlan *)eth_fr; memset(vlan, 0, sizeof(*vlan)); ether_addr_copy(vlan->eth.h_source, qedf->mac); ether_addr_copy(vlan->eth.h_dest, my_fcoe_all_fcfs); vlan->eth.h_proto = htons(ETH_P_FIP); vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER); vlan->fip.fip_op = htons(FIP_OP_VLAN); vlan->fip.fip_subcode = FIP_SC_VL_REQ; vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW); vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC; vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW; ether_addr_copy(vlan->desc.mac.fd_mac, qedf->mac); vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME; vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW; put_unaligned_be64(qedf->lport->wwnn, &vlan->desc.wwnn.fd_wwn); skb_put(skb, sizeof(*vlan)); skb->protocol = htons(ETH_P_FIP); skb_reset_mac_header(skb); skb_reset_network_header(skb); QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Sending FIP VLAN " "request."); if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) { QEDF_WARN(&(qedf->dbg_ctx), "Cannot send vlan request " "because link is not up.\n"); kfree_skb(skb); return; } set_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &flags); rc = qed_ops->ll2->start_xmit(qedf->cdev, skb, flags); if (rc) { QEDF_ERR(&qedf->dbg_ctx, "start_xmit failed rc = %d.\n", rc); kfree_skb(skb); return; } } static void qedf_fcoe_process_vlan_resp(struct qedf_ctx *qedf, struct sk_buff *skb) { struct fip_header *fiph; struct fip_desc *desc; u16 vid = 0; ssize_t rlen; size_t dlen; fiph = (struct fip_header *)(((void *)skb->data) + 2 * ETH_ALEN + 2); rlen = ntohs(fiph->fip_dl_len) * 4; desc = (struct fip_desc *)(fiph + 1); while (rlen > 0) { dlen = desc->fip_dlen * FIP_BPW; switch (desc->fip_dtype) { case FIP_DT_VLAN: vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan); break; } desc = (struct fip_desc *)((char *)desc + dlen); rlen -= dlen; } if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) { QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Dropping VLAN response as link is down.\n"); return; } QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "VLAN response, " "vid=0x%x.\n", vid); if (vid > 0 && qedf->vlan_id != vid) { qedf_set_vlan_id(qedf, vid); /* Inform waiter that it's ok to call fcoe_ctlr_link up() */ if (!completion_done(&qedf->fipvlan_compl)) complete(&qedf->fipvlan_compl); } } void qedf_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) { struct qedf_ctx *qedf = container_of(fip, struct qedf_ctx, ctlr); struct ethhdr *eth_hdr; struct fip_header *fiph; u16 op, vlan_tci = 0; u8 sub; int rc = -1; if (!test_bit(QEDF_LL2_STARTED, &qedf->flags)) { QEDF_WARN(&(qedf->dbg_ctx), "LL2 not started\n"); kfree_skb(skb); return; } fiph = (struct fip_header *) ((void *)skb->data + 2 * ETH_ALEN + 2); eth_hdr = (struct ethhdr *)skb_mac_header(skb); op = ntohs(fiph->fip_op); sub = fiph->fip_subcode; /* * Add VLAN tag to non-offload FIP frame based on current stored VLAN * for FIP/FCoE traffic. */ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), qedf->vlan_id); /* Get VLAN ID from skb for printing purposes */ __vlan_hwaccel_get_tag(skb, &vlan_tci); QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FIP frame send: " "dest=%pM op=%x sub=%x vlan=%04x.", eth_hdr->h_dest, op, sub, vlan_tci); if (qedf_dump_frames) print_hex_dump(KERN_WARNING, "fip ", DUMP_PREFIX_OFFSET, 16, 1, skb->data, skb->len, false); rc = qed_ops->ll2->start_xmit(qedf->cdev, skb, 0); if (rc) { QEDF_ERR(&qedf->dbg_ctx, "start_xmit failed rc = %d.\n", rc); kfree_skb(skb); return; } } static u8 fcoe_all_enode[ETH_ALEN] = FIP_ALL_ENODE_MACS; /* Process incoming FIP frames. */ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb) { struct ethhdr *eth_hdr; struct fip_header *fiph; struct fip_desc *desc; struct fip_mac_desc *mp; struct fip_wwn_desc *wp; struct fip_vn_desc *vp; size_t rlen, dlen; u16 op; u8 sub; bool fcf_valid = false; /* Default is to handle CVL regardless of fabric id descriptor */ bool fabric_id_valid = true; bool fc_wwpn_valid = false; u64 switch_name; u16 vlan = 0; eth_hdr = (struct ethhdr *)skb_mac_header(skb); fiph = (struct fip_header *) ((void *)skb->data + 2 * ETH_ALEN + 2); op = ntohs(fiph->fip_op); sub = fiph->fip_subcode; QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2, "FIP frame received: skb=%p fiph=%p source=%pM destn=%pM op=%x sub=%x vlan=%04x", skb, fiph, eth_hdr->h_source, eth_hdr->h_dest, op, sub, vlan); if (qedf_dump_frames) print_hex_dump(KERN_WARNING, "fip ", DUMP_PREFIX_OFFSET, 16, 1, skb->data, skb->len, false); if (!ether_addr_equal(eth_hdr->h_dest, qedf->mac) && !ether_addr_equal(eth_hdr->h_dest, fcoe_all_enode) && !ether_addr_equal(eth_hdr->h_dest, qedf->data_src_addr)) { QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2, "Dropping FIP type 0x%x pkt due to destination MAC mismatch dest_mac=%pM ctlr.dest_addr=%pM data_src_addr=%pM.\n", op, eth_hdr->h_dest, qedf->mac, qedf->data_src_addr); kfree_skb(skb); return; } /* Handle FIP VLAN resp in the driver */ if (op == FIP_OP_VLAN && sub == FIP_SC_VL_NOTE) { qedf_fcoe_process_vlan_resp(qedf, skb); kfree_skb(skb); } else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Clear virtual " "link received.\n"); /* Check that an FCF has been selected by fcoe */ if (qedf->ctlr.sel_fcf == NULL) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Dropping CVL since FCF has not been selected " "yet."); kfree_skb(skb); return; } /* * We need to loop through the CVL descriptors to determine * if we want to reset the fcoe link */ rlen = ntohs(fiph->fip_dl_len) * FIP_BPW; desc = (struct fip_desc *)(fiph + 1); while (rlen >= sizeof(*desc)) { dlen = desc->fip_dlen * FIP_BPW; switch (desc->fip_dtype) { case FIP_DT_MAC: mp = (struct fip_mac_desc *)desc; QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Switch fd_mac=%pM.\n", mp->fd_mac); if (ether_addr_equal(mp->fd_mac, qedf->ctlr.sel_fcf->fcf_mac)) fcf_valid = true; break; case FIP_DT_NAME: wp = (struct fip_wwn_desc *)desc; switch_name = get_unaligned_be64(&wp->fd_wwn); QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Switch fd_wwn=%016llx fcf_switch_name=%016llx.\n", switch_name, qedf->ctlr.sel_fcf->switch_name); if (switch_name == qedf->ctlr.sel_fcf->switch_name) fc_wwpn_valid = true; break; case FIP_DT_VN_ID: fabric_id_valid = false; vp = (struct fip_vn_desc *)desc; QEDF_ERR(&qedf->dbg_ctx, "CVL vx_port fd_fc_id=0x%x fd_mac=%pM fd_wwpn=%016llx.\n", ntoh24(vp->fd_fc_id), vp->fd_mac, get_unaligned_be64(&vp->fd_wwpn)); /* Check for vx_port wwpn OR Check vx_port * fabric ID OR Check vx_port MAC */ if ((get_unaligned_be64(&vp->fd_wwpn) == qedf->wwpn) || (ntoh24(vp->fd_fc_id) == qedf->lport->port_id) || (ether_addr_equal(vp->fd_mac, qedf->data_src_addr))) { fabric_id_valid = true; } break; default: /* Ignore anything else */ break; } desc = (struct fip_desc *)((char *)desc + dlen); rlen -= dlen; } QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "fcf_valid=%d fabric_id_valid=%d fc_wwpn_valid=%d.\n", fcf_valid, fabric_id_valid, fc_wwpn_valid); if (fcf_valid && fabric_id_valid && fc_wwpn_valid) qedf_ctx_soft_reset(qedf->lport); kfree_skb(skb); } else { /* Everything else is handled by libfcoe */ __skb_pull(skb, ETH_HLEN); fcoe_ctlr_recv(&qedf->ctlr, skb); } } u8 *qedf_get_src_mac(struct fc_lport *lport) { struct qedf_ctx *qedf = lport_priv(lport); return qedf->data_src_addr; }
linux-master
drivers/scsi/qedf/qedf_fip.c
// SPDX-License-Identifier: GPL-2.0-only /* QLogic FCoE Offload Driver * Copyright (c) 2016-2018 Cavium Inc. */ #include "drv_fcoe_fw_funcs.h" #include "drv_scsi_fw_funcs.h" #define FCOE_RX_ID (0xFFFFu) static inline void init_common_sqe(struct fcoe_task_params *task_params, enum fcoe_sqe_request_type request_type) { memset(task_params->sqe, 0, sizeof(*(task_params->sqe))); SET_FIELD(task_params->sqe->flags, FCOE_WQE_REQ_TYPE, request_type); task_params->sqe->task_id = task_params->itid; } int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params, struct scsi_sgl_task_params *sgl_task_params, struct regpair sense_data_buffer_phys_addr, u32 task_retry_id, u8 fcp_cmd_payload[32]) { struct fcoe_task_context *ctx = task_params->context; const u8 val_byte = ctx->ystorm_ag_context.byte0; struct ustorm_fcoe_task_ag_ctx *u_ag_ctx; struct ystorm_fcoe_task_st_ctx *y_st_ctx; struct tstorm_fcoe_task_st_ctx *t_st_ctx; struct mstorm_fcoe_task_st_ctx *m_st_ctx; u32 io_size, val; bool slow_sgl; memset(ctx, 0, sizeof(*(ctx))); ctx->ystorm_ag_context.byte0 = val_byte; slow_sgl = scsi_is_slow_sgl(sgl_task_params->num_sges, sgl_task_params->small_mid_sge); io_size = (task_params->task_type == FCOE_TASK_TYPE_WRITE_INITIATOR ? task_params->tx_io_size : task_params->rx_io_size); /* Ystorm ctx */ y_st_ctx = &ctx->ystorm_st_context; y_st_ctx->data_2_trns_rem = cpu_to_le32(io_size); y_st_ctx->task_rety_identifier = cpu_to_le32(task_retry_id); y_st_ctx->task_type = (u8)task_params->task_type; memcpy(&y_st_ctx->tx_info_union.fcp_cmd_payload, fcp_cmd_payload, sizeof(struct fcoe_fcp_cmd_payload)); /* Tstorm ctx */ t_st_ctx = &ctx->tstorm_st_context; t_st_ctx->read_only.dev_type = (u8)(task_params->is_tape_device == 1 ? FCOE_TASK_DEV_TYPE_TAPE : FCOE_TASK_DEV_TYPE_DISK); t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid); val = cpu_to_le32(task_params->cq_rss_number); t_st_ctx->read_only.glbl_q_num = val; t_st_ctx->read_only.fcp_cmd_trns_size = cpu_to_le32(io_size); t_st_ctx->read_only.task_type = (u8)task_params->task_type; SET_FIELD(t_st_ctx->read_write.flags, FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1); t_st_ctx->read_write.rx_id = cpu_to_le16(FCOE_RX_ID); /* Ustorm ctx */ u_ag_ctx = &ctx->ustorm_ag_context; u_ag_ctx->global_cq_num = cpu_to_le32(task_params->cq_rss_number); /* Mstorm buffer for sense/rsp data placement */ m_st_ctx = &ctx->mstorm_st_context; val = cpu_to_le32(sense_data_buffer_phys_addr.hi); m_st_ctx->rsp_buf_addr.hi = val; val = cpu_to_le32(sense_data_buffer_phys_addr.lo); m_st_ctx->rsp_buf_addr.lo = val; if (task_params->task_type == FCOE_TASK_TYPE_WRITE_INITIATOR) { /* Ystorm ctx */ y_st_ctx->expect_first_xfer = 1; /* Set the amount of super SGEs. Can be up to 4. */ SET_FIELD(y_st_ctx->sgl_mode, YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE, (slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL)); init_scsi_sgl_context(&y_st_ctx->sgl_params, &y_st_ctx->data_desc, sgl_task_params); /* Mstorm ctx */ SET_FIELD(m_st_ctx->flags, MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE, (slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL)); m_st_ctx->sgl_params.sgl_num_sges = cpu_to_le16(sgl_task_params->num_sges); } else { /* Tstorm ctx */ SET_FIELD(t_st_ctx->read_write.flags, FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE, (slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL)); /* Mstorm ctx */ m_st_ctx->data_2_trns_rem = cpu_to_le32(io_size); init_scsi_sgl_context(&m_st_ctx->sgl_params, &m_st_ctx->data_desc, sgl_task_params); } /* Init Sqe */ init_common_sqe(task_params, SEND_FCOE_CMD); return 0; } int init_initiator_midpath_unsolicited_fcoe_task( struct fcoe_task_params *task_params, struct fcoe_tx_mid_path_params *mid_path_fc_header, struct scsi_sgl_task_params *tx_sgl_task_params, struct scsi_sgl_task_params *rx_sgl_task_params, u8 fw_to_place_fc_header) { struct fcoe_task_context *ctx = task_params->context; const u8 val_byte = ctx->ystorm_ag_context.byte0; struct ustorm_fcoe_task_ag_ctx *u_ag_ctx; struct ystorm_fcoe_task_st_ctx *y_st_ctx; struct tstorm_fcoe_task_st_ctx *t_st_ctx; struct mstorm_fcoe_task_st_ctx *m_st_ctx; u32 val; memset(ctx, 0, sizeof(*(ctx))); ctx->ystorm_ag_context.byte0 = val_byte; /* Init Ystorm */ y_st_ctx = &ctx->ystorm_st_context; init_scsi_sgl_context(&y_st_ctx->sgl_params, &y_st_ctx->data_desc, tx_sgl_task_params); SET_FIELD(y_st_ctx->sgl_mode, YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE, SCSI_FAST_SGL); y_st_ctx->data_2_trns_rem = cpu_to_le32(task_params->tx_io_size); y_st_ctx->task_type = (u8)task_params->task_type; memcpy(&y_st_ctx->tx_info_union.tx_params.mid_path, mid_path_fc_header, sizeof(struct fcoe_tx_mid_path_params)); /* Init Mstorm */ m_st_ctx = &ctx->mstorm_st_context; init_scsi_sgl_context(&m_st_ctx->sgl_params, &m_st_ctx->data_desc, rx_sgl_task_params); SET_FIELD(m_st_ctx->flags, MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER, fw_to_place_fc_header); m_st_ctx->data_2_trns_rem = cpu_to_le32(task_params->rx_io_size); /* Init Tstorm */ t_st_ctx = &ctx->tstorm_st_context; t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid); val = cpu_to_le32(task_params->cq_rss_number); t_st_ctx->read_only.glbl_q_num = val; t_st_ctx->read_only.task_type = (u8)task_params->task_type; SET_FIELD(t_st_ctx->read_write.flags, FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1); t_st_ctx->read_write.rx_id = cpu_to_le16(FCOE_RX_ID); /* Init Ustorm */ u_ag_ctx = &ctx->ustorm_ag_context; u_ag_ctx->global_cq_num = cpu_to_le32(task_params->cq_rss_number); /* Init SQE */ init_common_sqe(task_params, SEND_FCOE_MIDPATH); task_params->sqe->additional_info_union.burst_length = tx_sgl_task_params->total_buffer_size; SET_FIELD(task_params->sqe->flags, FCOE_WQE_NUM_SGES, tx_sgl_task_params->num_sges); SET_FIELD(task_params->sqe->flags, FCOE_WQE_SGL_MODE, SCSI_FAST_SGL); return 0; } int init_initiator_abort_fcoe_task(struct fcoe_task_params *task_params) { init_common_sqe(task_params, SEND_FCOE_ABTS_REQUEST); return 0; } int init_initiator_cleanup_fcoe_task(struct fcoe_task_params *task_params) { init_common_sqe(task_params, FCOE_EXCHANGE_CLEANUP); return 0; } int init_initiator_sequence_recovery_fcoe_task( struct fcoe_task_params *task_params, u32 desired_offset) { init_common_sqe(task_params, FCOE_SEQUENCE_RECOVERY); task_params->sqe->additional_info_union.seq_rec_updated_offset = desired_offset; return 0; }
linux-master
drivers/scsi/qedf/drv_fcoe_fw_funcs.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic FCoE Offload Driver * Copyright (c) 2016-2018 Cavium Inc. */ #include "qedf.h" /* It's assumed that the lock is held when calling this function. */ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op, void *data, uint32_t data_len, void (*cb_func)(struct qedf_els_cb_arg *cb_arg), struct qedf_els_cb_arg *cb_arg, uint32_t timer_msec) { struct qedf_ctx *qedf; struct fc_lport *lport; struct qedf_ioreq *els_req; struct qedf_mp_req *mp_req; struct fc_frame_header *fc_hdr; struct fcoe_task_context *task; int rc = 0; uint32_t did, sid; uint16_t xid; struct fcoe_wqe *sqe; unsigned long flags; u16 sqe_idx; if (!fcport) { QEDF_ERR(NULL, "fcport is NULL"); rc = -EINVAL; goto els_err; } qedf = fcport->qedf; lport = qedf->lport; QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending ELS\n"); rc = fc_remote_port_chkready(fcport->rport); if (rc) { QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: rport not ready\n", op); rc = -EAGAIN; goto els_err; } if (lport->state != LPORT_ST_READY || !(lport->link_up)) { QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: link is not ready\n", op); rc = -EAGAIN; goto els_err; } if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: fcport not ready\n", op); rc = -EINVAL; goto els_err; } els_req = qedf_alloc_cmd(fcport, QEDF_ELS); if (!els_req) { QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS, "Failed to alloc ELS request 0x%x\n", op); rc = -ENOMEM; goto els_err; } QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "initiate_els els_req = " "0x%p cb_arg = %p xid = %x\n", els_req, cb_arg, els_req->xid); els_req->sc_cmd = NULL; els_req->cmd_type = QEDF_ELS; els_req->fcport = fcport; els_req->cb_func = cb_func; cb_arg->io_req = els_req; cb_arg->op = op; els_req->cb_arg = cb_arg; els_req->data_xfer_len = data_len; /* Record which cpu this request is associated with */ els_req->cpu = smp_processor_id(); mp_req = (struct qedf_mp_req *)&(els_req->mp_req); rc = qedf_init_mp_req(els_req); if (rc) { QEDF_ERR(&(qedf->dbg_ctx), "ELS MP request init failed\n"); kref_put(&els_req->refcount, qedf_release_cmd); goto els_err; } else { rc = 0; } /* Fill ELS Payload */ if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) { memcpy(mp_req->req_buf, data, data_len); } else { QEDF_ERR(&(qedf->dbg_ctx), "Invalid ELS op 0x%x\n", op); els_req->cb_func = NULL; els_req->cb_arg = NULL; kref_put(&els_req->refcount, qedf_release_cmd); rc = -EINVAL; } if (rc) goto els_err; /* Fill FC header */ fc_hdr = &(mp_req->req_fc_hdr); did = fcport->rdata->ids.port_id; sid = fcport->sid; __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid, FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); /* Obtain exchange id */ xid = els_req->xid; spin_lock_irqsave(&fcport->rport_lock, flags); sqe_idx = qedf_get_sqe_idx(fcport); sqe = &fcport->sq[sqe_idx]; memset(sqe, 0, sizeof(struct fcoe_wqe)); /* Initialize task context for this IO request */ task = qedf_get_task_mem(&qedf->tasks, xid); qedf_init_mp_task(els_req, task, sqe); /* Put timer on els request */ if (timer_msec) qedf_cmd_timer_set(qedf, els_req, timer_msec); /* Ring doorbell */ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Ringing doorbell for ELS " "req\n"); qedf_ring_doorbell(fcport); set_bit(QEDF_CMD_OUTSTANDING, &els_req->flags); spin_unlock_irqrestore(&fcport->rport_lock, flags); els_err: return rc; } void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, struct qedf_ioreq *els_req) { struct fcoe_cqe_midpath_info *mp_info; struct qedf_rport *fcport; QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered with xid = 0x%x" " cmd_type = %d.\n", els_req->xid, els_req->cmd_type); if ((els_req->event == QEDF_IOREQ_EV_ELS_FLUSH) || (els_req->event == QEDF_IOREQ_EV_CLEANUP_SUCCESS) || (els_req->event == QEDF_IOREQ_EV_CLEANUP_FAILED)) { QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "ELS completion xid=0x%x after flush event=0x%x", els_req->xid, els_req->event); return; } fcport = els_req->fcport; /* When flush is active, * let the cmds be completed from the cleanup context */ if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) || test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags)) { QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Dropping ELS completion xid=0x%x as fcport is flushing", els_req->xid); return; } clear_bit(QEDF_CMD_OUTSTANDING, &els_req->flags); /* Kill the ELS timer */ cancel_delayed_work(&els_req->timeout_work); /* Get ELS response length from CQE */ mp_info = &cqe->cqe_info.midpath_info; els_req->mp_req.resp_len = mp_info->data_placement_size; /* Parse ELS response */ if ((els_req->cb_func) && (els_req->cb_arg)) { els_req->cb_func(els_req->cb_arg); els_req->cb_arg = NULL; } kref_put(&els_req->refcount, qedf_release_cmd); } static void qedf_rrq_compl(struct qedf_els_cb_arg *cb_arg) { struct qedf_ioreq *orig_io_req; struct qedf_ioreq *rrq_req; struct qedf_ctx *qedf; int refcount; rrq_req = cb_arg->io_req; qedf = rrq_req->fcport->qedf; QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered.\n"); orig_io_req = cb_arg->aborted_io_req; if (!orig_io_req) { QEDF_ERR(&qedf->dbg_ctx, "Original io_req is NULL, rrq_req = %p.\n", rrq_req); goto out_free; } refcount = kref_read(&orig_io_req->refcount); QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "rrq_compl: orig io = %p," " orig xid = 0x%x, rrq_xid = 0x%x, refcount=%d\n", orig_io_req, orig_io_req->xid, rrq_req->xid, refcount); /* * This should return the aborted io_req to the command pool. Note that * we need to check the refcound in case the original request was * flushed but we get a completion on this xid. */ if (orig_io_req && refcount > 0) kref_put(&orig_io_req->refcount, qedf_release_cmd); out_free: /* * Release a reference to the rrq request if we timed out as the * rrq completion handler is called directly from the timeout handler * and not from els_compl where the reference would have normally been * released. */ if (rrq_req->event == QEDF_IOREQ_EV_ELS_TMO) kref_put(&rrq_req->refcount, qedf_release_cmd); kfree(cb_arg); } /* Assumes kref is already held by caller */ int qedf_send_rrq(struct qedf_ioreq *aborted_io_req) { struct fc_els_rrq rrq; struct qedf_rport *fcport; struct fc_lport *lport; struct qedf_els_cb_arg *cb_arg = NULL; struct qedf_ctx *qedf; uint32_t sid; uint32_t r_a_tov; int rc; int refcount; if (!aborted_io_req) { QEDF_ERR(NULL, "abort_io_req is NULL.\n"); return -EINVAL; } fcport = aborted_io_req->fcport; if (!fcport) { refcount = kref_read(&aborted_io_req->refcount); QEDF_ERR(NULL, "RRQ work was queued prior to a flush xid=0x%x, refcount=%d.\n", aborted_io_req->xid, refcount); kref_put(&aborted_io_req->refcount, qedf_release_cmd); return -EINVAL; } /* Check that fcport is still offloaded */ if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { QEDF_ERR(NULL, "fcport is no longer offloaded.\n"); return -EINVAL; } if (!fcport->qedf) { QEDF_ERR(NULL, "fcport->qedf is NULL.\n"); return -EINVAL; } qedf = fcport->qedf; /* * Sanity check that we can send a RRQ to make sure that refcount isn't * 0 */ refcount = kref_read(&aborted_io_req->refcount); if (refcount != 1) { QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS, "refcount for xid=%x io_req=%p refcount=%d is not 1.\n", aborted_io_req->xid, aborted_io_req, refcount); return -EINVAL; } lport = qedf->lport; sid = fcport->sid; r_a_tov = lport->r_a_tov; QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending RRQ orig " "io = %p, orig_xid = 0x%x\n", aborted_io_req, aborted_io_req->xid); memset(&rrq, 0, sizeof(rrq)); cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO); if (!cb_arg) { QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for " "RRQ\n"); rc = -ENOMEM; goto rrq_err; } cb_arg->aborted_io_req = aborted_io_req; rrq.rrq_cmd = ELS_RRQ; hton24(rrq.rrq_s_id, sid); rrq.rrq_ox_id = htons(aborted_io_req->xid); rrq.rrq_rx_id = htons(aborted_io_req->task->tstorm_st_context.read_write.rx_id); rc = qedf_initiate_els(fcport, ELS_RRQ, &rrq, sizeof(rrq), qedf_rrq_compl, cb_arg, r_a_tov); rrq_err: if (rc) { QEDF_ERR(&(qedf->dbg_ctx), "RRQ failed - release orig io " "req 0x%x\n", aborted_io_req->xid); kfree(cb_arg); kref_put(&aborted_io_req->refcount, qedf_release_cmd); } return rc; } static void qedf_process_l2_frame_compl(struct qedf_rport *fcport, struct fc_frame *fp, u16 l2_oxid) { struct fc_lport *lport = fcport->qedf->lport; struct fc_frame_header *fh; u32 crc; fh = (struct fc_frame_header *)fc_frame_header_get(fp); /* Set the OXID we return to what libfc used */ if (l2_oxid != FC_XID_UNKNOWN) fh->fh_ox_id = htons(l2_oxid); /* Setup header fields */ fh->fh_r_ctl = FC_RCTL_ELS_REP; fh->fh_type = FC_TYPE_ELS; /* Last sequence, end sequence */ fh->fh_f_ctl[0] = 0x98; hton24(fh->fh_d_id, lport->port_id); hton24(fh->fh_s_id, fcport->rdata->ids.port_id); fh->fh_rx_id = 0xffff; /* Set frame attributes */ crc = fcoe_fc_crc(fp); fc_frame_init(fp); fr_dev(fp) = lport; fr_sof(fp) = FC_SOF_I3; fr_eof(fp) = FC_EOF_T; fr_crc(fp) = cpu_to_le32(~crc); /* Send completed request to libfc */ fc_exch_recv(lport, fp); } /* * In instances where an ELS command times out we may need to restart the * rport by logging out and then logging back in. */ void qedf_restart_rport(struct qedf_rport *fcport) { struct fc_lport *lport; struct fc_rport_priv *rdata; u32 port_id; unsigned long flags; if (!fcport) { QEDF_ERR(NULL, "fcport is NULL.\n"); return; } spin_lock_irqsave(&fcport->rport_lock, flags); if (test_bit(QEDF_RPORT_IN_RESET, &fcport->flags) || !test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) || test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { QEDF_ERR(&(fcport->qedf->dbg_ctx), "fcport %p already in reset or not offloaded.\n", fcport); spin_unlock_irqrestore(&fcport->rport_lock, flags); return; } /* Set that we are now in reset */ set_bit(QEDF_RPORT_IN_RESET, &fcport->flags); spin_unlock_irqrestore(&fcport->rport_lock, flags); rdata = fcport->rdata; if (rdata && !kref_get_unless_zero(&rdata->kref)) { fcport->rdata = NULL; rdata = NULL; } if (rdata && rdata->rp_state == RPORT_ST_READY) { lport = fcport->qedf->lport; port_id = rdata->ids.port_id; QEDF_ERR(&(fcport->qedf->dbg_ctx), "LOGO port_id=%x.\n", port_id); fc_rport_logoff(rdata); kref_put(&rdata->kref, fc_rport_destroy); mutex_lock(&lport->disc.disc_mutex); /* Recreate the rport and log back in */ rdata = fc_rport_create(lport, port_id); mutex_unlock(&lport->disc.disc_mutex); if (rdata) fc_rport_login(rdata); fcport->rdata = rdata; } clear_bit(QEDF_RPORT_IN_RESET, &fcport->flags); } static void qedf_l2_els_compl(struct qedf_els_cb_arg *cb_arg) { struct qedf_ioreq *els_req; struct qedf_rport *fcport; struct qedf_mp_req *mp_req; struct fc_frame *fp; struct fc_frame_header *fh, *mp_fc_hdr; void *resp_buf, *fc_payload; u32 resp_len; u16 l2_oxid; l2_oxid = cb_arg->l2_oxid; els_req = cb_arg->io_req; if (!els_req) { QEDF_ERR(NULL, "els_req is NULL.\n"); goto free_arg; } /* * If we are flushing the command just free the cb_arg as none of the * response data will be valid. */ if (els_req->event == QEDF_IOREQ_EV_ELS_FLUSH) { QEDF_ERR(NULL, "els_req xid=0x%x event is flush.\n", els_req->xid); goto free_arg; } fcport = els_req->fcport; mp_req = &(els_req->mp_req); mp_fc_hdr = &(mp_req->resp_fc_hdr); resp_len = mp_req->resp_len; resp_buf = mp_req->resp_buf; /* * If a middle path ELS command times out, don't try to return * the command but rather do any internal cleanup and then libfc * timeout the command and clean up its internal resources. */ if (els_req->event == QEDF_IOREQ_EV_ELS_TMO) { /* * If ADISC times out, libfc will timeout the exchange and then * try to send a PLOGI which will timeout since the session is * still offloaded. Force libfc to logout the session which * will offload the connection and allow the PLOGI response to * flow over the LL2 path. */ if (cb_arg->op == ELS_ADISC) qedf_restart_rport(fcport); return; } if (sizeof(struct fc_frame_header) + resp_len > QEDF_PAGE_SIZE) { QEDF_ERR(&(fcport->qedf->dbg_ctx), "resp_len is " "beyond page size.\n"); goto free_arg; } fp = fc_frame_alloc(fcport->qedf->lport, resp_len); if (!fp) { QEDF_ERR(&(fcport->qedf->dbg_ctx), "fc_frame_alloc failure.\n"); return; } /* Copy frame header from firmware into fp */ fh = (struct fc_frame_header *)fc_frame_header_get(fp); memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header)); /* Copy payload from firmware into fp */ fc_payload = fc_frame_payload_get(fp, resp_len); memcpy(fc_payload, resp_buf, resp_len); QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS, "Completing OX_ID 0x%x back to libfc.\n", l2_oxid); qedf_process_l2_frame_compl(fcport, fp, l2_oxid); free_arg: kfree(cb_arg); } int qedf_send_adisc(struct qedf_rport *fcport, struct fc_frame *fp) { struct fc_els_adisc *adisc; struct fc_frame_header *fh; struct fc_lport *lport = fcport->qedf->lport; struct qedf_els_cb_arg *cb_arg = NULL; struct qedf_ctx *qedf; uint32_t r_a_tov = lport->r_a_tov; int rc; qedf = fcport->qedf; fh = fc_frame_header_get(fp); cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO); if (!cb_arg) { QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for " "ADISC\n"); rc = -ENOMEM; goto adisc_err; } cb_arg->l2_oxid = ntohs(fh->fh_ox_id); QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending ADISC ox_id=0x%x.\n", cb_arg->l2_oxid); adisc = fc_frame_payload_get(fp, sizeof(*adisc)); rc = qedf_initiate_els(fcport, ELS_ADISC, adisc, sizeof(*adisc), qedf_l2_els_compl, cb_arg, r_a_tov); adisc_err: if (rc) { QEDF_ERR(&(qedf->dbg_ctx), "ADISC failed.\n"); kfree(cb_arg); } return rc; } static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg) { struct qedf_ioreq *orig_io_req; struct qedf_ioreq *srr_req; struct qedf_mp_req *mp_req; struct fc_frame_header *mp_fc_hdr, *fh; struct fc_frame *fp; void *resp_buf, *fc_payload; u32 resp_len; struct fc_lport *lport; struct qedf_ctx *qedf; int refcount; u8 opcode; srr_req = cb_arg->io_req; qedf = srr_req->fcport->qedf; lport = qedf->lport; orig_io_req = cb_arg->aborted_io_req; if (!orig_io_req) { QEDF_ERR(NULL, "orig_io_req is NULL.\n"); goto out_free; } clear_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags); if (srr_req->event != QEDF_IOREQ_EV_ELS_TMO && srr_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT) cancel_delayed_work_sync(&orig_io_req->timeout_work); refcount = kref_read(&orig_io_req->refcount); QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p," " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n", orig_io_req, orig_io_req->xid, srr_req->xid, refcount); /* If a SRR times out, simply free resources */ if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO) { QEDF_ERR(&qedf->dbg_ctx, "ELS timeout rec_xid=0x%x.\n", srr_req->xid); goto out_put; } /* Normalize response data into struct fc_frame */ mp_req = &(srr_req->mp_req); mp_fc_hdr = &(mp_req->resp_fc_hdr); resp_len = mp_req->resp_len; resp_buf = mp_req->resp_buf; fp = fc_frame_alloc(lport, resp_len); if (!fp) { QEDF_ERR(&(qedf->dbg_ctx), "fc_frame_alloc failure.\n"); goto out_put; } /* Copy frame header from firmware into fp */ fh = (struct fc_frame_header *)fc_frame_header_get(fp); memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header)); /* Copy payload from firmware into fp */ fc_payload = fc_frame_payload_get(fp, resp_len); memcpy(fc_payload, resp_buf, resp_len); opcode = fc_frame_payload_op(fp); switch (opcode) { case ELS_LS_ACC: QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "SRR success.\n"); break; case ELS_LS_RJT: QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS, "SRR rejected.\n"); qedf_initiate_abts(orig_io_req, true); break; } fc_frame_free(fp); out_put: /* Put reference for original command since SRR completed */ kref_put(&orig_io_req->refcount, qedf_release_cmd); out_free: kfree(cb_arg); } static int qedf_send_srr(struct qedf_ioreq *orig_io_req, u32 offset, u8 r_ctl) { struct fcp_srr srr; struct qedf_ctx *qedf; struct qedf_rport *fcport; struct fc_lport *lport; struct qedf_els_cb_arg *cb_arg = NULL; u32 r_a_tov; int rc; if (!orig_io_req) { QEDF_ERR(NULL, "orig_io_req is NULL.\n"); return -EINVAL; } fcport = orig_io_req->fcport; /* Check that fcport is still offloaded */ if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { QEDF_ERR(NULL, "fcport is no longer offloaded.\n"); return -EINVAL; } if (!fcport->qedf) { QEDF_ERR(NULL, "fcport->qedf is NULL.\n"); return -EINVAL; } /* Take reference until SRR command completion */ kref_get(&orig_io_req->refcount); qedf = fcport->qedf; lport = qedf->lport; r_a_tov = lport->r_a_tov; QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending SRR orig_io=%p, " "orig_xid=0x%x\n", orig_io_req, orig_io_req->xid); memset(&srr, 0, sizeof(srr)); cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO); if (!cb_arg) { QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for " "SRR\n"); rc = -ENOMEM; goto srr_err; } cb_arg->aborted_io_req = orig_io_req; srr.srr_op = ELS_SRR; srr.srr_ox_id = htons(orig_io_req->xid); srr.srr_rx_id = htons(orig_io_req->rx_id); srr.srr_rel_off = htonl(offset); srr.srr_r_ctl = r_ctl; rc = qedf_initiate_els(fcport, ELS_SRR, &srr, sizeof(srr), qedf_srr_compl, cb_arg, r_a_tov); srr_err: if (rc) { QEDF_ERR(&(qedf->dbg_ctx), "SRR failed - release orig_io_req" "=0x%x\n", orig_io_req->xid); kfree(cb_arg); /* If we fail to queue SRR, send ABTS to orig_io */ qedf_initiate_abts(orig_io_req, true); kref_put(&orig_io_req->refcount, qedf_release_cmd); } else /* Tell other threads that SRR is in progress */ set_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags); return rc; } static void qedf_initiate_seq_cleanup(struct qedf_ioreq *orig_io_req, u32 offset, u8 r_ctl) { struct qedf_rport *fcport; unsigned long flags; struct qedf_els_cb_arg *cb_arg; struct fcoe_wqe *sqe; u16 sqe_idx; fcport = orig_io_req->fcport; QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS, "Doing sequence cleanup for xid=0x%x offset=%u.\n", orig_io_req->xid, offset); cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO); if (!cb_arg) { QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to allocate cb_arg " "for sequence cleanup\n"); return; } /* Get reference for cleanup request */ kref_get(&orig_io_req->refcount); orig_io_req->cmd_type = QEDF_SEQ_CLEANUP; cb_arg->offset = offset; cb_arg->r_ctl = r_ctl; orig_io_req->cb_arg = cb_arg; qedf_cmd_timer_set(fcport->qedf, orig_io_req, QEDF_CLEANUP_TIMEOUT * HZ); spin_lock_irqsave(&fcport->rport_lock, flags); sqe_idx = qedf_get_sqe_idx(fcport); sqe = &fcport->sq[sqe_idx]; memset(sqe, 0, sizeof(struct fcoe_wqe)); orig_io_req->task_params->sqe = sqe; init_initiator_sequence_recovery_fcoe_task(orig_io_req->task_params, offset); qedf_ring_doorbell(fcport); spin_unlock_irqrestore(&fcport->rport_lock, flags); } void qedf_process_seq_cleanup_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, struct qedf_ioreq *io_req) { int rc; struct qedf_els_cb_arg *cb_arg; cb_arg = io_req->cb_arg; /* If we timed out just free resources */ if (io_req->event == QEDF_IOREQ_EV_ELS_TMO || !cqe) { QEDF_ERR(&qedf->dbg_ctx, "cqe is NULL or timeout event (0x%x)", io_req->event); goto free; } /* Kill the timer we put on the request */ cancel_delayed_work_sync(&io_req->timeout_work); rc = qedf_send_srr(io_req, cb_arg->offset, cb_arg->r_ctl); if (rc) QEDF_ERR(&(qedf->dbg_ctx), "Unable to send SRR, I/O will " "abort, xid=0x%x.\n", io_req->xid); free: kfree(cb_arg); kref_put(&io_req->refcount, qedf_release_cmd); } static bool qedf_requeue_io_req(struct qedf_ioreq *orig_io_req) { struct qedf_rport *fcport; struct qedf_ioreq *new_io_req; unsigned long flags; bool rc = false; fcport = orig_io_req->fcport; if (!fcport) { QEDF_ERR(NULL, "fcport is NULL.\n"); goto out; } if (!orig_io_req->sc_cmd) { QEDF_ERR(&(fcport->qedf->dbg_ctx), "sc_cmd is NULL for " "xid=0x%x.\n", orig_io_req->xid); goto out; } new_io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD); if (!new_io_req) { QEDF_ERR(&(fcport->qedf->dbg_ctx), "Could not allocate new " "io_req.\n"); goto out; } new_io_req->sc_cmd = orig_io_req->sc_cmd; /* * This keeps the sc_cmd struct from being returned to the tape * driver and being requeued twice. We do need to put a reference * for the original I/O request since we will not do a SCSI completion * for it. */ orig_io_req->sc_cmd = NULL; kref_put(&orig_io_req->refcount, qedf_release_cmd); spin_lock_irqsave(&fcport->rport_lock, flags); /* kref for new command released in qedf_post_io_req on error */ if (qedf_post_io_req(fcport, new_io_req)) { QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to post io_req\n"); /* Return SQE to pool */ atomic_inc(&fcport->free_sqes); } else { QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS, "Reissued SCSI command from orig_xid=0x%x on " "new_xid=0x%x.\n", orig_io_req->xid, new_io_req->xid); /* * Abort the original I/O but do not return SCSI command as * it has been reissued on another OX_ID. */ spin_unlock_irqrestore(&fcport->rport_lock, flags); qedf_initiate_abts(orig_io_req, false); goto out; } spin_unlock_irqrestore(&fcport->rport_lock, flags); out: return rc; } static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg) { struct qedf_ioreq *orig_io_req; struct qedf_ioreq *rec_req; struct qedf_mp_req *mp_req; struct fc_frame_header *mp_fc_hdr, *fh; struct fc_frame *fp; void *resp_buf, *fc_payload; u32 resp_len; struct fc_lport *lport; struct qedf_ctx *qedf; int refcount; enum fc_rctl r_ctl; struct fc_els_ls_rjt *rjt; struct fc_els_rec_acc *acc; u8 opcode; u32 offset, e_stat; struct scsi_cmnd *sc_cmd; bool srr_needed = false; rec_req = cb_arg->io_req; qedf = rec_req->fcport->qedf; lport = qedf->lport; orig_io_req = cb_arg->aborted_io_req; if (!orig_io_req) { QEDF_ERR(NULL, "orig_io_req is NULL.\n"); goto out_free; } if (rec_req->event != QEDF_IOREQ_EV_ELS_TMO && rec_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT) cancel_delayed_work_sync(&orig_io_req->timeout_work); refcount = kref_read(&orig_io_req->refcount); QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p," " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n", orig_io_req, orig_io_req->xid, rec_req->xid, refcount); /* If a REC times out, free resources */ if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO) { QEDF_ERR(&qedf->dbg_ctx, "Got TMO event, orig_io_req %p orig_io_xid=0x%x.\n", orig_io_req, orig_io_req->xid); goto out_put; } /* Normalize response data into struct fc_frame */ mp_req = &(rec_req->mp_req); mp_fc_hdr = &(mp_req->resp_fc_hdr); resp_len = mp_req->resp_len; acc = resp_buf = mp_req->resp_buf; fp = fc_frame_alloc(lport, resp_len); if (!fp) { QEDF_ERR(&(qedf->dbg_ctx), "fc_frame_alloc failure.\n"); goto out_put; } /* Copy frame header from firmware into fp */ fh = (struct fc_frame_header *)fc_frame_header_get(fp); memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header)); /* Copy payload from firmware into fp */ fc_payload = fc_frame_payload_get(fp, resp_len); memcpy(fc_payload, resp_buf, resp_len); opcode = fc_frame_payload_op(fp); if (opcode == ELS_LS_RJT) { rjt = fc_frame_payload_get(fp, sizeof(*rjt)); if (!rjt) { QEDF_ERR(&qedf->dbg_ctx, "payload get failed"); goto out_free_frame; } QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Received LS_RJT for REC: er_reason=0x%x, " "er_explan=0x%x.\n", rjt->er_reason, rjt->er_explan); /* * The following response(s) mean that we need to reissue the * request on another exchange. We need to do this without * informing the upper layers lest it cause an application * error. */ if ((rjt->er_reason == ELS_RJT_LOGIC || rjt->er_reason == ELS_RJT_UNAB) && rjt->er_explan == ELS_EXPL_OXID_RXID) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Handle CMD LOST case.\n"); qedf_requeue_io_req(orig_io_req); } } else if (opcode == ELS_LS_ACC) { offset = ntohl(acc->reca_fc4value); e_stat = ntohl(acc->reca_e_stat); QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Received LS_ACC for REC: offset=0x%x, e_stat=0x%x.\n", offset, e_stat); if (e_stat & ESB_ST_SEQ_INIT) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Target has the seq init\n"); goto out_free_frame; } sc_cmd = orig_io_req->sc_cmd; if (!sc_cmd) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "sc_cmd is NULL for xid=0x%x.\n", orig_io_req->xid); goto out_free_frame; } /* SCSI write case */ if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { if (offset == orig_io_req->data_xfer_len) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "WRITE - response lost.\n"); r_ctl = FC_RCTL_DD_CMD_STATUS; srr_needed = true; offset = 0; } else { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "WRITE - XFER_RDY/DATA lost.\n"); r_ctl = FC_RCTL_DD_DATA_DESC; /* Use data from warning CQE instead of REC */ offset = orig_io_req->tx_buf_off; } /* SCSI read case */ } else { if (orig_io_req->rx_buf_off == orig_io_req->data_xfer_len) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "READ - response lost.\n"); srr_needed = true; r_ctl = FC_RCTL_DD_CMD_STATUS; offset = 0; } else { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "READ - DATA lost.\n"); /* * For read case we always set the offset to 0 * for sequence recovery task. */ offset = 0; r_ctl = FC_RCTL_DD_SOL_DATA; } } if (srr_needed) qedf_send_srr(orig_io_req, offset, r_ctl); else qedf_initiate_seq_cleanup(orig_io_req, offset, r_ctl); } out_free_frame: fc_frame_free(fp); out_put: /* Put reference for original command since REC completed */ kref_put(&orig_io_req->refcount, qedf_release_cmd); out_free: kfree(cb_arg); } /* Assumes kref is already held by caller */ int qedf_send_rec(struct qedf_ioreq *orig_io_req) { struct fc_els_rec rec; struct qedf_rport *fcport; struct fc_lport *lport; struct qedf_els_cb_arg *cb_arg = NULL; struct qedf_ctx *qedf; uint32_t sid; uint32_t r_a_tov; int rc; if (!orig_io_req) { QEDF_ERR(NULL, "orig_io_req is NULL.\n"); return -EINVAL; } fcport = orig_io_req->fcport; /* Check that fcport is still offloaded */ if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { QEDF_ERR(NULL, "fcport is no longer offloaded.\n"); return -EINVAL; } if (!fcport->qedf) { QEDF_ERR(NULL, "fcport->qedf is NULL.\n"); return -EINVAL; } /* Take reference until REC command completion */ kref_get(&orig_io_req->refcount); qedf = fcport->qedf; lport = qedf->lport; sid = fcport->sid; r_a_tov = lport->r_a_tov; memset(&rec, 0, sizeof(rec)); cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO); if (!cb_arg) { QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for " "REC\n"); rc = -ENOMEM; goto rec_err; } cb_arg->aborted_io_req = orig_io_req; rec.rec_cmd = ELS_REC; hton24(rec.rec_s_id, sid); rec.rec_ox_id = htons(orig_io_req->xid); rec.rec_rx_id = htons(orig_io_req->task->tstorm_st_context.read_write.rx_id); QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending REC orig_io=%p, " "orig_xid=0x%x rx_id=0x%x\n", orig_io_req, orig_io_req->xid, rec.rec_rx_id); rc = qedf_initiate_els(fcport, ELS_REC, &rec, sizeof(rec), qedf_rec_compl, cb_arg, r_a_tov); rec_err: if (rc) { QEDF_ERR(&(qedf->dbg_ctx), "REC failed - release orig_io_req" "=0x%x\n", orig_io_req->xid); kfree(cb_arg); kref_put(&orig_io_req->refcount, qedf_release_cmd); } return rc; }
linux-master
drivers/scsi/qedf/qedf_els.c
// SPDX-License-Identifier: GPL-2.0-only /* QLogic FCoE Offload Driver * Copyright (c) 2016-2018 Cavium Inc. */ #include "drv_scsi_fw_funcs.h" #define SCSI_NUM_SGES_IN_CACHE 0x4 bool scsi_is_slow_sgl(u16 num_sges, bool small_mid_sge) { return (num_sges > SCSI_NUM_SGES_SLOW_SGL_THR && small_mid_sge); } void init_scsi_sgl_context(struct scsi_sgl_params *ctx_sgl_params, struct scsi_cached_sges *ctx_data_desc, struct scsi_sgl_task_params *sgl_task_params) { /* no need to check for sgl_task_params->sgl validity */ u8 num_sges_to_init = sgl_task_params->num_sges > SCSI_NUM_SGES_IN_CACHE ? SCSI_NUM_SGES_IN_CACHE : sgl_task_params->num_sges; u8 sge_index; u32 val; val = cpu_to_le32(sgl_task_params->sgl_phys_addr.lo); ctx_sgl_params->sgl_addr.lo = val; val = cpu_to_le32(sgl_task_params->sgl_phys_addr.hi); ctx_sgl_params->sgl_addr.hi = val; val = cpu_to_le32(sgl_task_params->total_buffer_size); ctx_sgl_params->sgl_total_length = val; ctx_sgl_params->sgl_num_sges = cpu_to_le16(sgl_task_params->num_sges); for (sge_index = 0; sge_index < num_sges_to_init; sge_index++) { val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.lo); ctx_data_desc->sge[sge_index].sge_addr.lo = val; val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.hi); ctx_data_desc->sge[sge_index].sge_addr.hi = val; val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_len); ctx_data_desc->sge[sge_index].sge_len = val; } }
linux-master
drivers/scsi/qedf/drv_scsi_fw_funcs.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic FCoE Offload Driver * Copyright (c) 2016-2018 Cavium Inc. */ #include "qedf_dbg.h" #include <linux/vmalloc.h> void qedf_dbg_err(struct qedf_dbg_ctx *qedf, const char *func, u32 line, const char *fmt, ...) { va_list va; struct va_format vaf; va_start(va, fmt); vaf.fmt = fmt; vaf.va = &va; if (likely(qedf) && likely(qedf->pdev)) pr_err("[%s]:[%s:%d]:%d: %pV", dev_name(&(qedf->pdev->dev)), func, line, qedf->host_no, &vaf); else pr_err("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf); va_end(va); } void qedf_dbg_warn(struct qedf_dbg_ctx *qedf, const char *func, u32 line, const char *fmt, ...) { va_list va; struct va_format vaf; va_start(va, fmt); vaf.fmt = fmt; vaf.va = &va; if (!(qedf_debug & QEDF_LOG_WARN)) goto ret; if (likely(qedf) && likely(qedf->pdev)) pr_warn("[%s]:[%s:%d]:%d: %pV", dev_name(&(qedf->pdev->dev)), func, line, qedf->host_no, &vaf); else pr_warn("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf); ret: va_end(va); } void qedf_dbg_notice(struct qedf_dbg_ctx *qedf, const char *func, u32 line, const char *fmt, ...) { va_list va; struct va_format vaf; va_start(va, fmt); vaf.fmt = fmt; vaf.va = &va; if (!(qedf_debug & QEDF_LOG_NOTICE)) goto ret; if (likely(qedf) && likely(qedf->pdev)) pr_notice("[%s]:[%s:%d]:%d: %pV", dev_name(&(qedf->pdev->dev)), func, line, qedf->host_no, &vaf); else pr_notice("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf); ret: va_end(va); } void qedf_dbg_info(struct qedf_dbg_ctx *qedf, const char *func, u32 line, u32 level, const char *fmt, ...) { va_list va; struct va_format vaf; va_start(va, fmt); vaf.fmt = fmt; vaf.va = &va; if (!(qedf_debug & level)) goto ret; if (likely(qedf) && likely(qedf->pdev)) pr_info("[%s]:[%s:%d]:%d: %pV", dev_name(&(qedf->pdev->dev)), func, line, qedf->host_no, &vaf); else pr_info("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf); ret: va_end(va); } int qedf_alloc_grc_dump_buf(u8 **buf, uint32_t len) { *buf = vzalloc(len); if (!(*buf)) return -ENOMEM; return 0; } void qedf_free_grc_dump_buf(uint8_t **buf) { vfree(*buf); *buf = NULL; } int qedf_get_grc_dump(struct qed_dev *cdev, const struct qed_common_ops *common, u8 **buf, uint32_t *grcsize) { if (!*buf) return -EINVAL; return common->dbg_all_data(cdev, *buf); } void qedf_uevent_emit(struct Scsi_Host *shost, u32 code, char *msg) { char event_string[40]; char *envp[] = {event_string, NULL}; memset(event_string, 0, sizeof(event_string)); switch (code) { case QEDF_UEVENT_CODE_GRCDUMP: if (msg) strscpy(event_string, msg, sizeof(event_string)); else sprintf(event_string, "GRCDUMP=%u", shost->host_no); break; default: /* do nothing */ break; } kobject_uevent_env(&shost->shost_gendev.kobj, KOBJ_CHANGE, envp); } int qedf_create_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter) { int ret = 0; for (; iter->name; iter++) { ret = sysfs_create_bin_file(&shost->shost_gendev.kobj, iter->attr); if (ret) pr_err("Unable to create sysfs %s attr, err(%d).\n", iter->name, ret); } return ret; } void qedf_remove_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter) { for (; iter->name; iter++) sysfs_remove_bin_file(&shost->shost_gendev.kobj, iter->attr); }
linux-master
drivers/scsi/qedf/qedf_dbg.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic FCoE Offload Driver * Copyright (c) 2016-2018 QLogic Corporation */ #ifdef CONFIG_DEBUG_FS #include <linux/uaccess.h> #include <linux/debugfs.h> #include <linux/module.h> #include <linux/vmalloc.h> #include "qedf.h" #include "qedf_dbg.h" static struct dentry *qedf_dbg_root; /* * qedf_dbg_host_init - setup the debugfs file for the pf */ void qedf_dbg_host_init(struct qedf_dbg_ctx *qedf, const struct qedf_debugfs_ops *dops, const struct file_operations *fops) { char host_dirname[32]; QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, "Creating debugfs host node\n"); /* create pf dir */ sprintf(host_dirname, "host%u", qedf->host_no); qedf->bdf_dentry = debugfs_create_dir(host_dirname, qedf_dbg_root); /* create debugfs files */ while (dops) { if (!(dops->name)) break; debugfs_create_file(dops->name, 0600, qedf->bdf_dentry, qedf, fops); dops++; fops++; } } /* * qedf_dbg_host_exit - clear out the pf's debugfs entries */ void qedf_dbg_host_exit(struct qedf_dbg_ctx *qedf_dbg) { QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "Destroying debugfs host " "entry\n"); /* remove debugfs entries of this PF */ debugfs_remove_recursive(qedf_dbg->bdf_dentry); qedf_dbg->bdf_dentry = NULL; } /* * qedf_dbg_init - start up debugfs for the driver */ void qedf_dbg_init(char *drv_name) { QEDF_INFO(NULL, QEDF_LOG_DEBUGFS, "Creating debugfs root node\n"); /* create qed dir in root of debugfs. NULL means debugfs root */ qedf_dbg_root = debugfs_create_dir(drv_name, NULL); } /* * qedf_dbg_exit - clean out the driver's debugfs entries */ void qedf_dbg_exit(void) { QEDF_INFO(NULL, QEDF_LOG_DEBUGFS, "Destroying debugfs root " "entry\n"); /* remove qed dir in root of debugfs */ debugfs_remove_recursive(qedf_dbg_root); qedf_dbg_root = NULL; } const struct qedf_debugfs_ops qedf_debugfs_ops[] = { { "fp_int", NULL }, { "io_trace", NULL }, { "debug", NULL }, { "stop_io_on_error", NULL}, { "driver_stats", NULL}, { "clear_stats", NULL}, { "offload_stats", NULL}, /* This must be last */ { NULL, NULL } }; DECLARE_PER_CPU(struct qedf_percpu_iothread_s, qedf_percpu_iothreads); static ssize_t qedf_dbg_fp_int_cmd_read(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { ssize_t ret; size_t cnt = 0; char *cbuf; int id; struct qedf_fastpath *fp = NULL; struct qedf_dbg_ctx *qedf_dbg = (struct qedf_dbg_ctx *)filp->private_data; struct qedf_ctx *qedf = container_of(qedf_dbg, struct qedf_ctx, dbg_ctx); QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n"); cbuf = vmalloc(QEDF_DEBUGFS_LOG_LEN); if (!cbuf) return 0; cnt += scnprintf(cbuf + cnt, QEDF_DEBUGFS_LOG_LEN - cnt, "\nFastpath I/O completions\n\n"); for (id = 0; id < qedf->num_queues; id++) { fp = &(qedf->fp_array[id]); if (fp->sb_id == QEDF_SB_ID_NULL) continue; cnt += scnprintf(cbuf + cnt, QEDF_DEBUGFS_LOG_LEN - cnt, "#%d: %lu\n", id, fp->completions); } ret = simple_read_from_buffer(buffer, count, ppos, cbuf, cnt); vfree(cbuf); return ret; } static ssize_t qedf_dbg_fp_int_cmd_write(struct file *filp, const char __user *buffer, size_t count, loff_t *ppos) { if (!count || *ppos) return 0; return count; } static ssize_t qedf_dbg_debug_cmd_read(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { int cnt; char cbuf[32]; struct qedf_dbg_ctx *qedf_dbg = (struct qedf_dbg_ctx *)filp->private_data; QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "debug mask=0x%x\n", qedf_debug); cnt = scnprintf(cbuf, sizeof(cbuf), "debug mask = 0x%x\n", qedf_debug); return simple_read_from_buffer(buffer, count, ppos, cbuf, cnt); } static ssize_t qedf_dbg_debug_cmd_write(struct file *filp, const char __user *buffer, size_t count, loff_t *ppos) { uint32_t val; void *kern_buf; int rval; struct qedf_dbg_ctx *qedf_dbg = (struct qedf_dbg_ctx *)filp->private_data; if (!count || *ppos) return 0; kern_buf = memdup_user(buffer, count); if (IS_ERR(kern_buf)) return PTR_ERR(kern_buf); rval = kstrtouint(kern_buf, 10, &val); kfree(kern_buf); if (rval) return rval; if (val == 1) qedf_debug = QEDF_DEFAULT_LOG_MASK; else qedf_debug = val; QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "Setting debug=0x%x.\n", val); return count; } static ssize_t qedf_dbg_stop_io_on_error_cmd_read(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { int cnt; char cbuf[7]; struct qedf_dbg_ctx *qedf_dbg = (struct qedf_dbg_ctx *)filp->private_data; struct qedf_ctx *qedf = container_of(qedf_dbg, struct qedf_ctx, dbg_ctx); QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n"); cnt = scnprintf(cbuf, sizeof(cbuf), "%s\n", qedf->stop_io_on_error ? "true" : "false"); return simple_read_from_buffer(buffer, count, ppos, cbuf, cnt); } static ssize_t qedf_dbg_stop_io_on_error_cmd_write(struct file *filp, const char __user *buffer, size_t count, loff_t *ppos) { void *kern_buf; struct qedf_dbg_ctx *qedf_dbg = (struct qedf_dbg_ctx *)filp->private_data; struct qedf_ctx *qedf = container_of(qedf_dbg, struct qedf_ctx, dbg_ctx); QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n"); if (!count || *ppos) return 0; kern_buf = memdup_user(buffer, 6); if (IS_ERR(kern_buf)) return PTR_ERR(kern_buf); if (strncmp(kern_buf, "false", 5) == 0) qedf->stop_io_on_error = false; else if (strncmp(kern_buf, "true", 4) == 0) qedf->stop_io_on_error = true; else if (strncmp(kern_buf, "now", 3) == 0) /* Trigger from user to stop all I/O on this host */ set_bit(QEDF_DBG_STOP_IO, &qedf->flags); kfree(kern_buf); return count; } static int qedf_io_trace_show(struct seq_file *s, void *unused) { int i, idx = 0; struct qedf_ctx *qedf = s->private; struct qedf_dbg_ctx *qedf_dbg = &qedf->dbg_ctx; struct qedf_io_log *io_log; unsigned long flags; if (!qedf_io_tracing) { seq_puts(s, "I/O tracing not enabled.\n"); goto out; } QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n"); spin_lock_irqsave(&qedf->io_trace_lock, flags); idx = qedf->io_trace_idx; for (i = 0; i < QEDF_IO_TRACE_SIZE; i++) { io_log = &qedf->io_trace_buf[idx]; seq_printf(s, "%d:", io_log->direction); seq_printf(s, "0x%x:", io_log->task_id); seq_printf(s, "0x%06x:", io_log->port_id); seq_printf(s, "%d:", io_log->lun); seq_printf(s, "0x%02x:", io_log->op); seq_printf(s, "0x%02x%02x%02x%02x:", io_log->lba[0], io_log->lba[1], io_log->lba[2], io_log->lba[3]); seq_printf(s, "%d:", io_log->bufflen); seq_printf(s, "%d:", io_log->sg_count); seq_printf(s, "0x%08x:", io_log->result); seq_printf(s, "%lu:", io_log->jiffies); seq_printf(s, "%d:", io_log->refcount); seq_printf(s, "%d:", io_log->req_cpu); seq_printf(s, "%d:", io_log->int_cpu); seq_printf(s, "%d:", io_log->rsp_cpu); seq_printf(s, "%d\n", io_log->sge_type); idx++; if (idx == QEDF_IO_TRACE_SIZE) idx = 0; } spin_unlock_irqrestore(&qedf->io_trace_lock, flags); out: return 0; } static int qedf_dbg_io_trace_open(struct inode *inode, struct file *file) { struct qedf_dbg_ctx *qedf_dbg = inode->i_private; struct qedf_ctx *qedf = container_of(qedf_dbg, struct qedf_ctx, dbg_ctx); return single_open(file, qedf_io_trace_show, qedf); } /* Based on fip_state enum from libfcoe.h */ static char *fip_state_names[] = { "FIP_ST_DISABLED", "FIP_ST_LINK_WAIT", "FIP_ST_AUTO", "FIP_ST_NON_FIP", "FIP_ST_ENABLED", "FIP_ST_VNMP_START", "FIP_ST_VNMP_PROBE1", "FIP_ST_VNMP_PROBE2", "FIP_ST_VNMP_CLAIM", "FIP_ST_VNMP_UP", }; /* Based on fc_rport_state enum from libfc.h */ static char *fc_rport_state_names[] = { "RPORT_ST_INIT", "RPORT_ST_FLOGI", "RPORT_ST_PLOGI_WAIT", "RPORT_ST_PLOGI", "RPORT_ST_PRLI", "RPORT_ST_RTV", "RPORT_ST_READY", "RPORT_ST_ADISC", "RPORT_ST_DELETE", }; static int qedf_driver_stats_show(struct seq_file *s, void *unused) { struct qedf_ctx *qedf = s->private; struct qedf_rport *fcport; struct fc_rport_priv *rdata; seq_printf(s, "Host WWNN/WWPN: %016llx/%016llx\n", qedf->wwnn, qedf->wwpn); seq_printf(s, "Host NPortID: %06x\n", qedf->lport->port_id); seq_printf(s, "Link State: %s\n", atomic_read(&qedf->link_state) ? "Up" : "Down"); seq_printf(s, "Logical Link State: %s\n", qedf->lport->link_up ? "Up" : "Down"); seq_printf(s, "FIP state: %s\n", fip_state_names[qedf->ctlr.state]); seq_printf(s, "FIP VLAN ID: %d\n", qedf->vlan_id & 0xfff); seq_printf(s, "FIP 802.1Q Priority: %d\n", qedf->prio); if (qedf->ctlr.sel_fcf) { seq_printf(s, "FCF WWPN: %016llx\n", qedf->ctlr.sel_fcf->switch_name); seq_printf(s, "FCF MAC: %pM\n", qedf->ctlr.sel_fcf->fcf_mac); } else { seq_puts(s, "FCF not selected\n"); } seq_puts(s, "\nSGE stats:\n\n"); seq_printf(s, "cmg_mgr free io_reqs: %d\n", atomic_read(&qedf->cmd_mgr->free_list_cnt)); seq_printf(s, "slow SGEs: %d\n", qedf->slow_sge_ios); seq_printf(s, "fast SGEs: %d\n\n", qedf->fast_sge_ios); seq_puts(s, "Offloaded ports:\n\n"); rcu_read_lock(); list_for_each_entry_rcu(fcport, &qedf->fcports, peers) { rdata = fcport->rdata; if (rdata == NULL) continue; seq_printf(s, "%016llx/%016llx/%06x: state=%s, free_sqes=%d, num_active_ios=%d\n", rdata->rport->node_name, rdata->rport->port_name, rdata->ids.port_id, fc_rport_state_names[rdata->rp_state], atomic_read(&fcport->free_sqes), atomic_read(&fcport->num_active_ios)); } rcu_read_unlock(); return 0; } static int qedf_dbg_driver_stats_open(struct inode *inode, struct file *file) { struct qedf_dbg_ctx *qedf_dbg = inode->i_private; struct qedf_ctx *qedf = container_of(qedf_dbg, struct qedf_ctx, dbg_ctx); return single_open(file, qedf_driver_stats_show, qedf); } static ssize_t qedf_dbg_clear_stats_cmd_read(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { int cnt = 0; /* Essentially a read stub */ cnt = min_t(int, count, cnt - *ppos); *ppos += cnt; return cnt; } static ssize_t qedf_dbg_clear_stats_cmd_write(struct file *filp, const char __user *buffer, size_t count, loff_t *ppos) { struct qedf_dbg_ctx *qedf_dbg = (struct qedf_dbg_ctx *)filp->private_data; struct qedf_ctx *qedf = container_of(qedf_dbg, struct qedf_ctx, dbg_ctx); QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "Clearing stat counters.\n"); if (!count || *ppos) return 0; /* Clear stat counters exposed by 'stats' node */ qedf->slow_sge_ios = 0; qedf->fast_sge_ios = 0; return count; } static int qedf_offload_stats_show(struct seq_file *s, void *unused) { struct qedf_ctx *qedf = s->private; struct qed_fcoe_stats *fw_fcoe_stats; fw_fcoe_stats = kmalloc(sizeof(struct qed_fcoe_stats), GFP_KERNEL); if (!fw_fcoe_stats) { QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate memory for " "fw_fcoe_stats.\n"); goto out; } /* Query firmware for offload stats */ qed_ops->get_stats(qedf->cdev, fw_fcoe_stats); seq_printf(s, "fcoe_rx_byte_cnt=%llu\n" "fcoe_rx_data_pkt_cnt=%llu\n" "fcoe_rx_xfer_pkt_cnt=%llu\n" "fcoe_rx_other_pkt_cnt=%llu\n" "fcoe_silent_drop_pkt_cmdq_full_cnt=%u\n" "fcoe_silent_drop_pkt_crc_error_cnt=%u\n" "fcoe_silent_drop_pkt_task_invalid_cnt=%u\n" "fcoe_silent_drop_total_pkt_cnt=%u\n" "fcoe_silent_drop_pkt_rq_full_cnt=%u\n" "fcoe_tx_byte_cnt=%llu\n" "fcoe_tx_data_pkt_cnt=%llu\n" "fcoe_tx_xfer_pkt_cnt=%llu\n" "fcoe_tx_other_pkt_cnt=%llu\n", fw_fcoe_stats->fcoe_rx_byte_cnt, fw_fcoe_stats->fcoe_rx_data_pkt_cnt, fw_fcoe_stats->fcoe_rx_xfer_pkt_cnt, fw_fcoe_stats->fcoe_rx_other_pkt_cnt, fw_fcoe_stats->fcoe_silent_drop_pkt_cmdq_full_cnt, fw_fcoe_stats->fcoe_silent_drop_pkt_crc_error_cnt, fw_fcoe_stats->fcoe_silent_drop_pkt_task_invalid_cnt, fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt, fw_fcoe_stats->fcoe_silent_drop_pkt_rq_full_cnt, fw_fcoe_stats->fcoe_tx_byte_cnt, fw_fcoe_stats->fcoe_tx_data_pkt_cnt, fw_fcoe_stats->fcoe_tx_xfer_pkt_cnt, fw_fcoe_stats->fcoe_tx_other_pkt_cnt); kfree(fw_fcoe_stats); out: return 0; } static int qedf_dbg_offload_stats_open(struct inode *inode, struct file *file) { struct qedf_dbg_ctx *qedf_dbg = inode->i_private; struct qedf_ctx *qedf = container_of(qedf_dbg, struct qedf_ctx, dbg_ctx); return single_open(file, qedf_offload_stats_show, qedf); } const struct file_operations qedf_dbg_fops[] = { qedf_dbg_fileops(qedf, fp_int), qedf_dbg_fileops_seq(qedf, io_trace), qedf_dbg_fileops(qedf, debug), qedf_dbg_fileops(qedf, stop_io_on_error), qedf_dbg_fileops_seq(qedf, driver_stats), qedf_dbg_fileops(qedf, clear_stats), qedf_dbg_fileops_seq(qedf, offload_stats), /* This must be last */ { }, }; #else /* CONFIG_DEBUG_FS */ void qedf_dbg_host_init(struct qedf_dbg_ctx *); void qedf_dbg_host_exit(struct qedf_dbg_ctx *); void qedf_dbg_init(char *); void qedf_dbg_exit(void); #endif /* CONFIG_DEBUG_FS */
linux-master
drivers/scsi/qedf/qedf_debugfs.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic FCoE Offload Driver * Copyright (c) 2016-2018 Cavium Inc. */ #include <linux/spinlock.h> #include <linux/vmalloc.h> #include "qedf.h" #include <scsi/scsi_tcq.h> void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req, unsigned int timer_msec) { queue_delayed_work(qedf->timer_work_queue, &io_req->timeout_work, msecs_to_jiffies(timer_msec)); } static void qedf_cmd_timeout(struct work_struct *work) { struct qedf_ioreq *io_req = container_of(work, struct qedf_ioreq, timeout_work.work); struct qedf_ctx *qedf; struct qedf_rport *fcport; fcport = io_req->fcport; if (io_req->fcport == NULL) { QEDF_INFO(NULL, QEDF_LOG_IO, "fcport is NULL.\n"); return; } qedf = fcport->qedf; switch (io_req->cmd_type) { case QEDF_ABTS: if (qedf == NULL) { QEDF_INFO(NULL, QEDF_LOG_IO, "qedf is NULL for ABTS xid=0x%x.\n", io_req->xid); return; } QEDF_ERR((&qedf->dbg_ctx), "ABTS timeout, xid=0x%x.\n", io_req->xid); /* Cleanup timed out ABTS */ qedf_initiate_cleanup(io_req, true); complete(&io_req->abts_done); /* * Need to call kref_put for reference taken when initiate_abts * was called since abts_compl won't be called now that we've * cleaned up the task. */ kref_put(&io_req->refcount, qedf_release_cmd); /* Clear in abort bit now that we're done with the command */ clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags); /* * Now that the original I/O and the ABTS are complete see * if we need to reconnect to the target. */ qedf_restart_rport(fcport); break; case QEDF_ELS: if (!qedf) { QEDF_INFO(NULL, QEDF_LOG_IO, "qedf is NULL for ELS xid=0x%x.\n", io_req->xid); return; } /* ELS request no longer outstanding since it timed out */ clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); kref_get(&io_req->refcount); /* * Don't attempt to clean an ELS timeout as any subseqeunt * ABTS or cleanup requests just hang. For now just free * the resources of the original I/O and the RRQ */ QEDF_ERR(&(qedf->dbg_ctx), "ELS timeout, xid=0x%x.\n", io_req->xid); qedf_initiate_cleanup(io_req, true); io_req->event = QEDF_IOREQ_EV_ELS_TMO; /* Call callback function to complete command */ if (io_req->cb_func && io_req->cb_arg) { io_req->cb_func(io_req->cb_arg); io_req->cb_arg = NULL; } kref_put(&io_req->refcount, qedf_release_cmd); break; case QEDF_SEQ_CLEANUP: QEDF_ERR(&(qedf->dbg_ctx), "Sequence cleanup timeout, " "xid=0x%x.\n", io_req->xid); qedf_initiate_cleanup(io_req, true); io_req->event = QEDF_IOREQ_EV_ELS_TMO; qedf_process_seq_cleanup_compl(qedf, NULL, io_req); break; default: QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Hit default case, xid=0x%x.\n", io_req->xid); break; } } void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr) { struct io_bdt *bdt_info; struct qedf_ctx *qedf = cmgr->qedf; size_t bd_tbl_sz; u16 min_xid = 0; u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1); int num_ios; int i; struct qedf_ioreq *io_req; num_ios = max_xid - min_xid + 1; /* Free fcoe_bdt_ctx structures */ if (!cmgr->io_bdt_pool) { QEDF_ERR(&qedf->dbg_ctx, "io_bdt_pool is NULL.\n"); goto free_cmd_pool; } bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge); for (i = 0; i < num_ios; i++) { bdt_info = cmgr->io_bdt_pool[i]; if (bdt_info->bd_tbl) { dma_free_coherent(&qedf->pdev->dev, bd_tbl_sz, bdt_info->bd_tbl, bdt_info->bd_tbl_dma); bdt_info->bd_tbl = NULL; } } /* Destroy io_bdt pool */ for (i = 0; i < num_ios; i++) { kfree(cmgr->io_bdt_pool[i]); cmgr->io_bdt_pool[i] = NULL; } kfree(cmgr->io_bdt_pool); cmgr->io_bdt_pool = NULL; free_cmd_pool: for (i = 0; i < num_ios; i++) { io_req = &cmgr->cmds[i]; kfree(io_req->sgl_task_params); kfree(io_req->task_params); /* Make sure we free per command sense buffer */ if (io_req->sense_buffer) dma_free_coherent(&qedf->pdev->dev, QEDF_SCSI_SENSE_BUFFERSIZE, io_req->sense_buffer, io_req->sense_buffer_dma); cancel_delayed_work_sync(&io_req->rrq_work); } /* Free command manager itself */ vfree(cmgr); } static void qedf_handle_rrq(struct work_struct *work) { struct qedf_ioreq *io_req = container_of(work, struct qedf_ioreq, rrq_work.work); atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_ACTIVE); qedf_send_rrq(io_req); } struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf) { struct qedf_cmd_mgr *cmgr; struct io_bdt *bdt_info; struct qedf_ioreq *io_req; u16 xid; int i; int num_ios; u16 min_xid = 0; u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1); /* Make sure num_queues is already set before calling this function */ if (!qedf->num_queues) { QEDF_ERR(&(qedf->dbg_ctx), "num_queues is not set.\n"); return NULL; } if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) { QEDF_WARN(&(qedf->dbg_ctx), "Invalid min_xid 0x%x and " "max_xid 0x%x.\n", min_xid, max_xid); return NULL; } QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "min xid 0x%x, max xid " "0x%x.\n", min_xid, max_xid); num_ios = max_xid - min_xid + 1; cmgr = vzalloc(sizeof(struct qedf_cmd_mgr)); if (!cmgr) { QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc cmd mgr.\n"); return NULL; } cmgr->qedf = qedf; spin_lock_init(&cmgr->lock); /* * Initialize I/O request fields. */ xid = 0; for (i = 0; i < num_ios; i++) { io_req = &cmgr->cmds[i]; INIT_DELAYED_WORK(&io_req->timeout_work, qedf_cmd_timeout); io_req->xid = xid++; INIT_DELAYED_WORK(&io_req->rrq_work, qedf_handle_rrq); /* Allocate DMA memory to hold sense buffer */ io_req->sense_buffer = dma_alloc_coherent(&qedf->pdev->dev, QEDF_SCSI_SENSE_BUFFERSIZE, &io_req->sense_buffer_dma, GFP_KERNEL); if (!io_req->sense_buffer) { QEDF_ERR(&qedf->dbg_ctx, "Failed to alloc sense buffer.\n"); goto mem_err; } /* Allocate task parameters to pass to f/w init funcions */ io_req->task_params = kzalloc(sizeof(*io_req->task_params), GFP_KERNEL); if (!io_req->task_params) { QEDF_ERR(&(qedf->dbg_ctx), "Failed to allocate task_params for xid=0x%x\n", i); goto mem_err; } /* * Allocate scatter/gather list info to pass to f/w init * functions. */ io_req->sgl_task_params = kzalloc( sizeof(struct scsi_sgl_task_params), GFP_KERNEL); if (!io_req->sgl_task_params) { QEDF_ERR(&(qedf->dbg_ctx), "Failed to allocate sgl_task_params for xid=0x%x\n", i); goto mem_err; } } /* Allocate pool of io_bdts - one for each qedf_ioreq */ cmgr->io_bdt_pool = kmalloc_array(num_ios, sizeof(struct io_bdt *), GFP_KERNEL); if (!cmgr->io_bdt_pool) { QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc io_bdt_pool.\n"); goto mem_err; } for (i = 0; i < num_ios; i++) { cmgr->io_bdt_pool[i] = kmalloc(sizeof(struct io_bdt), GFP_KERNEL); if (!cmgr->io_bdt_pool[i]) { QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc io_bdt_pool[%d].\n", i); goto mem_err; } } for (i = 0; i < num_ios; i++) { bdt_info = cmgr->io_bdt_pool[i]; bdt_info->bd_tbl = dma_alloc_coherent(&qedf->pdev->dev, QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge), &bdt_info->bd_tbl_dma, GFP_KERNEL); if (!bdt_info->bd_tbl) { QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc bdt_tbl[%d].\n", i); goto mem_err; } } atomic_set(&cmgr->free_list_cnt, num_ios); QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "cmgr->free_list_cnt=%d.\n", atomic_read(&cmgr->free_list_cnt)); return cmgr; mem_err: qedf_cmd_mgr_free(cmgr); return NULL; } struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type) { struct qedf_ctx *qedf = fcport->qedf; struct qedf_cmd_mgr *cmd_mgr = qedf->cmd_mgr; struct qedf_ioreq *io_req = NULL; struct io_bdt *bd_tbl; u16 xid; uint32_t free_sqes; int i; unsigned long flags; free_sqes = atomic_read(&fcport->free_sqes); if (!free_sqes) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Returning NULL, free_sqes=%d.\n ", free_sqes); goto out_failed; } /* Limit the number of outstanding R/W tasks */ if ((atomic_read(&fcport->num_active_ios) >= NUM_RW_TASKS_PER_CONNECTION)) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Returning NULL, num_active_ios=%d.\n", atomic_read(&fcport->num_active_ios)); goto out_failed; } /* Limit global TIDs certain tasks */ if (atomic_read(&cmd_mgr->free_list_cnt) <= GBL_RSVD_TASKS) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Returning NULL, free_list_cnt=%d.\n", atomic_read(&cmd_mgr->free_list_cnt)); goto out_failed; } spin_lock_irqsave(&cmd_mgr->lock, flags); for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) { io_req = &cmd_mgr->cmds[cmd_mgr->idx]; cmd_mgr->idx++; if (cmd_mgr->idx == FCOE_PARAMS_NUM_TASKS) cmd_mgr->idx = 0; /* Check to make sure command was previously freed */ if (!io_req->alloc) break; } if (i == FCOE_PARAMS_NUM_TASKS) { spin_unlock_irqrestore(&cmd_mgr->lock, flags); goto out_failed; } if (test_bit(QEDF_CMD_DIRTY, &io_req->flags)) QEDF_ERR(&qedf->dbg_ctx, "io_req found to be dirty ox_id = 0x%x.\n", io_req->xid); /* Clear any flags now that we've reallocated the xid */ io_req->flags = 0; io_req->alloc = 1; spin_unlock_irqrestore(&cmd_mgr->lock, flags); atomic_inc(&fcport->num_active_ios); atomic_dec(&fcport->free_sqes); xid = io_req->xid; atomic_dec(&cmd_mgr->free_list_cnt); io_req->cmd_mgr = cmd_mgr; io_req->fcport = fcport; /* Clear any stale sc_cmd back pointer */ io_req->sc_cmd = NULL; io_req->lun = -1; /* Hold the io_req against deletion */ kref_init(&io_req->refcount); /* ID: 001 */ atomic_set(&io_req->state, QEDFC_CMD_ST_IO_ACTIVE); /* Bind io_bdt for this io_req */ /* Have a static link between io_req and io_bdt_pool */ bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid]; if (bd_tbl == NULL) { QEDF_ERR(&(qedf->dbg_ctx), "bd_tbl is NULL, xid=%x.\n", xid); kref_put(&io_req->refcount, qedf_release_cmd); goto out_failed; } bd_tbl->io_req = io_req; io_req->cmd_type = cmd_type; io_req->tm_flags = 0; /* Reset sequence offset data */ io_req->rx_buf_off = 0; io_req->tx_buf_off = 0; io_req->rx_id = 0xffff; /* No OX_ID */ return io_req; out_failed: /* Record failure for stats and return NULL to caller */ qedf->alloc_failures++; return NULL; } static void qedf_free_mp_resc(struct qedf_ioreq *io_req) { struct qedf_mp_req *mp_req = &(io_req->mp_req); struct qedf_ctx *qedf = io_req->fcport->qedf; uint64_t sz = sizeof(struct scsi_sge); /* clear tm flags */ if (mp_req->mp_req_bd) { dma_free_coherent(&qedf->pdev->dev, sz, mp_req->mp_req_bd, mp_req->mp_req_bd_dma); mp_req->mp_req_bd = NULL; } if (mp_req->mp_resp_bd) { dma_free_coherent(&qedf->pdev->dev, sz, mp_req->mp_resp_bd, mp_req->mp_resp_bd_dma); mp_req->mp_resp_bd = NULL; } if (mp_req->req_buf) { dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE, mp_req->req_buf, mp_req->req_buf_dma); mp_req->req_buf = NULL; } if (mp_req->resp_buf) { dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE, mp_req->resp_buf, mp_req->resp_buf_dma); mp_req->resp_buf = NULL; } } void qedf_release_cmd(struct kref *ref) { struct qedf_ioreq *io_req = container_of(ref, struct qedf_ioreq, refcount); struct qedf_cmd_mgr *cmd_mgr = io_req->cmd_mgr; struct qedf_rport *fcport = io_req->fcport; unsigned long flags; if (io_req->cmd_type == QEDF_SCSI_CMD) { QEDF_WARN(&fcport->qedf->dbg_ctx, "Cmd released called without scsi_done called, io_req %p xid=0x%x.\n", io_req, io_req->xid); WARN_ON(io_req->sc_cmd); } if (io_req->cmd_type == QEDF_ELS || io_req->cmd_type == QEDF_TASK_MGMT_CMD) qedf_free_mp_resc(io_req); atomic_inc(&cmd_mgr->free_list_cnt); atomic_dec(&fcport->num_active_ios); atomic_set(&io_req->state, QEDF_CMD_ST_INACTIVE); if (atomic_read(&fcport->num_active_ios) < 0) { QEDF_WARN(&(fcport->qedf->dbg_ctx), "active_ios < 0.\n"); WARN_ON(1); } /* Increment task retry identifier now that the request is released */ io_req->task_retry_identifier++; io_req->fcport = NULL; clear_bit(QEDF_CMD_DIRTY, &io_req->flags); io_req->cpu = 0; spin_lock_irqsave(&cmd_mgr->lock, flags); io_req->fcport = NULL; io_req->alloc = 0; spin_unlock_irqrestore(&cmd_mgr->lock, flags); } static int qedf_map_sg(struct qedf_ioreq *io_req) { struct scsi_cmnd *sc = io_req->sc_cmd; struct Scsi_Host *host = sc->device->host; struct fc_lport *lport = shost_priv(host); struct qedf_ctx *qedf = lport_priv(lport); struct scsi_sge *bd = io_req->bd_tbl->bd_tbl; struct scatterlist *sg; int byte_count = 0; int sg_count = 0; int bd_count = 0; u32 sg_len; u64 addr; int i = 0; sg_count = dma_map_sg(&qedf->pdev->dev, scsi_sglist(sc), scsi_sg_count(sc), sc->sc_data_direction); sg = scsi_sglist(sc); io_req->sge_type = QEDF_IOREQ_UNKNOWN_SGE; if (sg_count <= 8 || io_req->io_req_flags == QEDF_READ) io_req->sge_type = QEDF_IOREQ_FAST_SGE; scsi_for_each_sg(sc, sg, sg_count, i) { sg_len = (u32)sg_dma_len(sg); addr = (u64)sg_dma_address(sg); /* * Intermediate s/g element so check if start address * is page aligned. Only required for writes and only if the * number of scatter/gather elements is 8 or more. */ if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE && (i) && (i != (sg_count - 1)) && sg_len < QEDF_PAGE_SIZE) io_req->sge_type = QEDF_IOREQ_SLOW_SGE; bd[bd_count].sge_addr.lo = cpu_to_le32(U64_LO(addr)); bd[bd_count].sge_addr.hi = cpu_to_le32(U64_HI(addr)); bd[bd_count].sge_len = cpu_to_le32(sg_len); bd_count++; byte_count += sg_len; } /* To catch a case where FAST and SLOW nothing is set, set FAST */ if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE) io_req->sge_type = QEDF_IOREQ_FAST_SGE; if (byte_count != scsi_bufflen(sc)) QEDF_ERR(&(qedf->dbg_ctx), "byte_count = %d != " "scsi_bufflen = %d, task_id = 0x%x.\n", byte_count, scsi_bufflen(sc), io_req->xid); return bd_count; } static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req) { struct scsi_cmnd *sc = io_req->sc_cmd; struct scsi_sge *bd = io_req->bd_tbl->bd_tbl; int bd_count; if (scsi_sg_count(sc)) { bd_count = qedf_map_sg(io_req); if (bd_count == 0) return -ENOMEM; } else { bd_count = 0; bd[0].sge_addr.lo = bd[0].sge_addr.hi = 0; bd[0].sge_len = 0; } io_req->bd_tbl->bd_valid = bd_count; return 0; } static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req, struct fcp_cmnd *fcp_cmnd) { struct scsi_cmnd *sc_cmd = io_req->sc_cmd; /* fcp_cmnd is 32 bytes */ memset(fcp_cmnd, 0, FCP_CMND_LEN); /* 8 bytes: SCSI LUN info */ int_to_scsilun(sc_cmd->device->lun, (struct scsi_lun *)&fcp_cmnd->fc_lun); /* 4 bytes: flag info */ fcp_cmnd->fc_pri_ta = 0; fcp_cmnd->fc_tm_flags = io_req->tm_flags; fcp_cmnd->fc_flags = io_req->io_req_flags; fcp_cmnd->fc_cmdref = 0; /* Populate data direction */ if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) { fcp_cmnd->fc_flags |= FCP_CFL_RDDATA; } else { if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) fcp_cmnd->fc_flags |= FCP_CFL_WRDATA; else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) fcp_cmnd->fc_flags |= FCP_CFL_RDDATA; } fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE; /* 16 bytes: CDB information */ if (io_req->cmd_type != QEDF_TASK_MGMT_CMD) memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len); /* 4 bytes: FCP data length */ fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len); } static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport, struct qedf_ioreq *io_req, struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe) { enum fcoe_task_type task_type; struct scsi_cmnd *sc_cmd = io_req->sc_cmd; struct io_bdt *bd_tbl = io_req->bd_tbl; u8 fcp_cmnd[32]; u32 tmp_fcp_cmnd[8]; int bd_count = 0; struct qedf_ctx *qedf = fcport->qedf; uint16_t cq_idx = smp_processor_id() % qedf->num_queues; struct regpair sense_data_buffer_phys_addr; u32 tx_io_size = 0; u32 rx_io_size = 0; int i, cnt; /* Note init_initiator_rw_fcoe_task memsets the task context */ io_req->task = task_ctx; memset(task_ctx, 0, sizeof(struct fcoe_task_context)); memset(io_req->task_params, 0, sizeof(struct fcoe_task_params)); memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params)); /* Set task type bassed on DMA directio of command */ if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) { task_type = FCOE_TASK_TYPE_READ_INITIATOR; } else { if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { task_type = FCOE_TASK_TYPE_WRITE_INITIATOR; tx_io_size = io_req->data_xfer_len; } else { task_type = FCOE_TASK_TYPE_READ_INITIATOR; rx_io_size = io_req->data_xfer_len; } } /* Setup the fields for fcoe_task_params */ io_req->task_params->context = task_ctx; io_req->task_params->sqe = sqe; io_req->task_params->task_type = task_type; io_req->task_params->tx_io_size = tx_io_size; io_req->task_params->rx_io_size = rx_io_size; io_req->task_params->conn_cid = fcport->fw_cid; io_req->task_params->itid = io_req->xid; io_req->task_params->cq_rss_number = cq_idx; io_req->task_params->is_tape_device = fcport->dev_type; /* Fill in information for scatter/gather list */ if (io_req->cmd_type != QEDF_TASK_MGMT_CMD) { bd_count = bd_tbl->bd_valid; io_req->sgl_task_params->sgl = bd_tbl->bd_tbl; io_req->sgl_task_params->sgl_phys_addr.lo = U64_LO(bd_tbl->bd_tbl_dma); io_req->sgl_task_params->sgl_phys_addr.hi = U64_HI(bd_tbl->bd_tbl_dma); io_req->sgl_task_params->num_sges = bd_count; io_req->sgl_task_params->total_buffer_size = scsi_bufflen(io_req->sc_cmd); if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE) io_req->sgl_task_params->small_mid_sge = 1; else io_req->sgl_task_params->small_mid_sge = 0; } /* Fill in physical address of sense buffer */ sense_data_buffer_phys_addr.lo = U64_LO(io_req->sense_buffer_dma); sense_data_buffer_phys_addr.hi = U64_HI(io_req->sense_buffer_dma); /* fill FCP_CMND IU */ qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tmp_fcp_cmnd); /* Swap fcp_cmnd since FC is big endian */ cnt = sizeof(struct fcp_cmnd) / sizeof(u32); for (i = 0; i < cnt; i++) { tmp_fcp_cmnd[i] = cpu_to_be32(tmp_fcp_cmnd[i]); } memcpy(fcp_cmnd, tmp_fcp_cmnd, sizeof(struct fcp_cmnd)); init_initiator_rw_fcoe_task(io_req->task_params, io_req->sgl_task_params, sense_data_buffer_phys_addr, io_req->task_retry_identifier, fcp_cmnd); /* Increment SGL type counters */ if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE) qedf->slow_sge_ios++; else qedf->fast_sge_ios++; } void qedf_init_mp_task(struct qedf_ioreq *io_req, struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe) { struct qedf_mp_req *mp_req = &(io_req->mp_req); struct qedf_rport *fcport = io_req->fcport; struct qedf_ctx *qedf = io_req->fcport->qedf; struct fc_frame_header *fc_hdr; struct fcoe_tx_mid_path_params task_fc_hdr; struct scsi_sgl_task_params tx_sgl_task_params; struct scsi_sgl_task_params rx_sgl_task_params; QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Initializing MP task for cmd_type=%d\n", io_req->cmd_type); qedf->control_requests++; memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params)); memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params)); memset(task_ctx, 0, sizeof(struct fcoe_task_context)); memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params)); /* Setup the task from io_req for easy reference */ io_req->task = task_ctx; /* Setup the fields for fcoe_task_params */ io_req->task_params->context = task_ctx; io_req->task_params->sqe = sqe; io_req->task_params->task_type = FCOE_TASK_TYPE_MIDPATH; io_req->task_params->tx_io_size = io_req->data_xfer_len; /* rx_io_size tells the f/w how large a response buffer we have */ io_req->task_params->rx_io_size = PAGE_SIZE; io_req->task_params->conn_cid = fcport->fw_cid; io_req->task_params->itid = io_req->xid; /* Return middle path commands on CQ 0 */ io_req->task_params->cq_rss_number = 0; io_req->task_params->is_tape_device = fcport->dev_type; fc_hdr = &(mp_req->req_fc_hdr); /* Set OX_ID and RX_ID based on driver task id */ fc_hdr->fh_ox_id = io_req->xid; fc_hdr->fh_rx_id = htons(0xffff); /* Set up FC header information */ task_fc_hdr.parameter = fc_hdr->fh_parm_offset; task_fc_hdr.r_ctl = fc_hdr->fh_r_ctl; task_fc_hdr.type = fc_hdr->fh_type; task_fc_hdr.cs_ctl = fc_hdr->fh_cs_ctl; task_fc_hdr.df_ctl = fc_hdr->fh_df_ctl; task_fc_hdr.rx_id = fc_hdr->fh_rx_id; task_fc_hdr.ox_id = fc_hdr->fh_ox_id; /* Set up s/g list parameters for request buffer */ tx_sgl_task_params.sgl = mp_req->mp_req_bd; tx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_req_bd_dma); tx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_req_bd_dma); tx_sgl_task_params.num_sges = 1; /* Set PAGE_SIZE for now since sg element is that size ??? */ tx_sgl_task_params.total_buffer_size = io_req->data_xfer_len; tx_sgl_task_params.small_mid_sge = 0; /* Set up s/g list parameters for request buffer */ rx_sgl_task_params.sgl = mp_req->mp_resp_bd; rx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_resp_bd_dma); rx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_resp_bd_dma); rx_sgl_task_params.num_sges = 1; /* Set PAGE_SIZE for now since sg element is that size ??? */ rx_sgl_task_params.total_buffer_size = PAGE_SIZE; rx_sgl_task_params.small_mid_sge = 0; /* * Last arg is 0 as previous code did not set that we wanted the * fc header information. */ init_initiator_midpath_unsolicited_fcoe_task(io_req->task_params, &task_fc_hdr, &tx_sgl_task_params, &rx_sgl_task_params, 0); } /* Presumed that fcport->rport_lock is held */ u16 qedf_get_sqe_idx(struct qedf_rport *fcport) { uint16_t total_sqe = (fcport->sq_mem_size)/(sizeof(struct fcoe_wqe)); u16 rval; rval = fcport->sq_prod_idx; /* Adjust ring index */ fcport->sq_prod_idx++; fcport->fw_sq_prod_idx++; if (fcport->sq_prod_idx == total_sqe) fcport->sq_prod_idx = 0; return rval; } void qedf_ring_doorbell(struct qedf_rport *fcport) { struct fcoe_db_data dbell = { 0 }; dbell.agg_flags = 0; dbell.params |= DB_DEST_XCM << FCOE_DB_DATA_DEST_SHIFT; dbell.params |= DB_AGG_CMD_SET << FCOE_DB_DATA_AGG_CMD_SHIFT; dbell.params |= DQ_XCM_FCOE_SQ_PROD_CMD << FCOE_DB_DATA_AGG_VAL_SEL_SHIFT; dbell.sq_prod = fcport->fw_sq_prod_idx; /* wmb makes sure that the BDs data is updated before updating the * producer, otherwise FW may read old data from the BDs. */ wmb(); barrier(); writel(*(u32 *)&dbell, fcport->p_doorbell); /* * Fence required to flush the write combined buffer, since another * CPU may write to the same doorbell address and data may be lost * due to relaxed order nature of write combined bar. */ wmb(); } static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req, int8_t direction) { struct qedf_ctx *qedf = fcport->qedf; struct qedf_io_log *io_log; struct scsi_cmnd *sc_cmd = io_req->sc_cmd; unsigned long flags; spin_lock_irqsave(&qedf->io_trace_lock, flags); io_log = &qedf->io_trace_buf[qedf->io_trace_idx]; io_log->direction = direction; io_log->task_id = io_req->xid; io_log->port_id = fcport->rdata->ids.port_id; io_log->lun = sc_cmd->device->lun; io_log->op = sc_cmd->cmnd[0]; io_log->lba[0] = sc_cmd->cmnd[2]; io_log->lba[1] = sc_cmd->cmnd[3]; io_log->lba[2] = sc_cmd->cmnd[4]; io_log->lba[3] = sc_cmd->cmnd[5]; io_log->bufflen = scsi_bufflen(sc_cmd); io_log->sg_count = scsi_sg_count(sc_cmd); io_log->result = sc_cmd->result; io_log->jiffies = jiffies; io_log->refcount = kref_read(&io_req->refcount); if (direction == QEDF_IO_TRACE_REQ) { /* For requests we only care abot the submission CPU */ io_log->req_cpu = io_req->cpu; io_log->int_cpu = 0; io_log->rsp_cpu = 0; } else if (direction == QEDF_IO_TRACE_RSP) { io_log->req_cpu = io_req->cpu; io_log->int_cpu = io_req->int_cpu; io_log->rsp_cpu = smp_processor_id(); } io_log->sge_type = io_req->sge_type; qedf->io_trace_idx++; if (qedf->io_trace_idx == QEDF_IO_TRACE_SIZE) qedf->io_trace_idx = 0; spin_unlock_irqrestore(&qedf->io_trace_lock, flags); } int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req) { struct scsi_cmnd *sc_cmd = io_req->sc_cmd; struct Scsi_Host *host = sc_cmd->device->host; struct fc_lport *lport = shost_priv(host); struct qedf_ctx *qedf = lport_priv(lport); struct fcoe_task_context *task_ctx; u16 xid; struct fcoe_wqe *sqe; u16 sqe_idx; /* Initialize rest of io_req fileds */ io_req->data_xfer_len = scsi_bufflen(sc_cmd); qedf_priv(sc_cmd)->io_req = io_req; io_req->sge_type = QEDF_IOREQ_FAST_SGE; /* Assume fast SGL by default */ /* Record which cpu this request is associated with */ io_req->cpu = smp_processor_id(); if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) { io_req->io_req_flags = QEDF_READ; qedf->input_requests++; } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { io_req->io_req_flags = QEDF_WRITE; qedf->output_requests++; } else { io_req->io_req_flags = 0; qedf->control_requests++; } xid = io_req->xid; /* Build buffer descriptor list for firmware from sg list */ if (qedf_build_bd_list_from_sg(io_req)) { QEDF_ERR(&(qedf->dbg_ctx), "BD list creation failed.\n"); /* Release cmd will release io_req, but sc_cmd is assigned */ io_req->sc_cmd = NULL; kref_put(&io_req->refcount, qedf_release_cmd); return -EAGAIN; } if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) || test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n"); /* Release cmd will release io_req, but sc_cmd is assigned */ io_req->sc_cmd = NULL; kref_put(&io_req->refcount, qedf_release_cmd); return -EINVAL; } /* Record LUN number for later use if we need them */ io_req->lun = (int)sc_cmd->device->lun; /* Obtain free SQE */ sqe_idx = qedf_get_sqe_idx(fcport); sqe = &fcport->sq[sqe_idx]; memset(sqe, 0, sizeof(struct fcoe_wqe)); /* Get the task context */ task_ctx = qedf_get_task_mem(&qedf->tasks, xid); if (!task_ctx) { QEDF_WARN(&(qedf->dbg_ctx), "task_ctx is NULL, xid=%d.\n", xid); /* Release cmd will release io_req, but sc_cmd is assigned */ io_req->sc_cmd = NULL; kref_put(&io_req->refcount, qedf_release_cmd); return -EINVAL; } qedf_init_task(fcport, lport, io_req, task_ctx, sqe); /* Ring doorbell */ qedf_ring_doorbell(fcport); /* Set that command is with the firmware now */ set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); if (qedf_io_tracing && io_req->sc_cmd) qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_REQ); return false; } int qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd) { struct fc_lport *lport = shost_priv(host); struct qedf_ctx *qedf = lport_priv(lport); struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); struct fc_rport_libfc_priv *rp = rport->dd_data; struct qedf_rport *fcport; struct qedf_ioreq *io_req; int rc = 0; int rval; unsigned long flags = 0; int num_sgs = 0; num_sgs = scsi_sg_count(sc_cmd); if (scsi_sg_count(sc_cmd) > QEDF_MAX_BDS_PER_CMD) { QEDF_ERR(&qedf->dbg_ctx, "Number of SG elements %d exceeds what hardware limitation of %d.\n", num_sgs, QEDF_MAX_BDS_PER_CMD); sc_cmd->result = DID_ERROR; scsi_done(sc_cmd); return 0; } if (test_bit(QEDF_UNLOADING, &qedf->flags) || test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) { QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Returning DNC as unloading or stop io, flags 0x%lx.\n", qedf->flags); sc_cmd->result = DID_NO_CONNECT << 16; scsi_done(sc_cmd); return 0; } if (!qedf->pdev->msix_enabled) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Completing sc_cmd=%p DID_NO_CONNECT as MSI-X is not enabled.\n", sc_cmd); sc_cmd->result = DID_NO_CONNECT << 16; scsi_done(sc_cmd); return 0; } rval = fc_remote_port_chkready(rport); if (rval) { QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "fc_remote_port_chkready failed=0x%x for port_id=0x%06x.\n", rval, rport->port_id); sc_cmd->result = rval; scsi_done(sc_cmd); return 0; } /* Retry command if we are doing a qed drain operation */ if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) { QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Drain active.\n"); rc = SCSI_MLQUEUE_HOST_BUSY; goto exit_qcmd; } if (lport->state != LPORT_ST_READY || atomic_read(&qedf->link_state) != QEDF_LINK_UP) { QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Link down.\n"); rc = SCSI_MLQUEUE_HOST_BUSY; goto exit_qcmd; } /* rport and tgt are allocated together, so tgt should be non-NULL */ fcport = (struct qedf_rport *)&rp[1]; if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) || test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { /* * Session is not offloaded yet. Let SCSI-ml retry * the command. */ rc = SCSI_MLQUEUE_TARGET_BUSY; goto exit_qcmd; } atomic_inc(&fcport->ios_to_queue); if (fcport->retry_delay_timestamp) { /* Take fcport->rport_lock for resetting the delay_timestamp */ spin_lock_irqsave(&fcport->rport_lock, flags); if (time_after(jiffies, fcport->retry_delay_timestamp)) { fcport->retry_delay_timestamp = 0; } else { spin_unlock_irqrestore(&fcport->rport_lock, flags); /* If retry_delay timer is active, flow off the ML */ rc = SCSI_MLQUEUE_TARGET_BUSY; atomic_dec(&fcport->ios_to_queue); goto exit_qcmd; } spin_unlock_irqrestore(&fcport->rport_lock, flags); } io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD); if (!io_req) { rc = SCSI_MLQUEUE_HOST_BUSY; atomic_dec(&fcport->ios_to_queue); goto exit_qcmd; } io_req->sc_cmd = sc_cmd; /* Take fcport->rport_lock for posting to fcport send queue */ spin_lock_irqsave(&fcport->rport_lock, flags); if (qedf_post_io_req(fcport, io_req)) { QEDF_WARN(&(qedf->dbg_ctx), "Unable to post io_req\n"); /* Return SQE to pool */ atomic_inc(&fcport->free_sqes); rc = SCSI_MLQUEUE_HOST_BUSY; } spin_unlock_irqrestore(&fcport->rport_lock, flags); atomic_dec(&fcport->ios_to_queue); exit_qcmd: return rc; } static void qedf_parse_fcp_rsp(struct qedf_ioreq *io_req, struct fcoe_cqe_rsp_info *fcp_rsp) { struct scsi_cmnd *sc_cmd = io_req->sc_cmd; struct qedf_ctx *qedf = io_req->fcport->qedf; u8 rsp_flags = fcp_rsp->rsp_flags.flags; int fcp_sns_len = 0; int fcp_rsp_len = 0; uint8_t *rsp_info, *sense_data; io_req->fcp_status = FC_GOOD; io_req->fcp_resid = 0; if (rsp_flags & (FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER | FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER)) io_req->fcp_resid = fcp_rsp->fcp_resid; io_req->scsi_comp_flags = rsp_flags; io_req->cdb_status = fcp_rsp->scsi_status_code; if (rsp_flags & FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID) fcp_rsp_len = fcp_rsp->fcp_rsp_len; if (rsp_flags & FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID) fcp_sns_len = fcp_rsp->fcp_sns_len; io_req->fcp_rsp_len = fcp_rsp_len; io_req->fcp_sns_len = fcp_sns_len; rsp_info = sense_data = io_req->sense_buffer; /* fetch fcp_rsp_code */ if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) { /* Only for task management function */ io_req->fcp_rsp_code = rsp_info[3]; QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "fcp_rsp_code = %d\n", io_req->fcp_rsp_code); /* Adjust sense-data location. */ sense_data += fcp_rsp_len; } if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Truncating sense buffer\n"); fcp_sns_len = SCSI_SENSE_BUFFERSIZE; } /* The sense buffer can be NULL for TMF commands */ if (sc_cmd->sense_buffer) { memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); if (fcp_sns_len) memcpy(sc_cmd->sense_buffer, sense_data, fcp_sns_len); } } static void qedf_unmap_sg_list(struct qedf_ctx *qedf, struct qedf_ioreq *io_req) { struct scsi_cmnd *sc = io_req->sc_cmd; if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) { dma_unmap_sg(&qedf->pdev->dev, scsi_sglist(sc), scsi_sg_count(sc), sc->sc_data_direction); io_req->bd_tbl->bd_valid = 0; } } void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, struct qedf_ioreq *io_req) { struct scsi_cmnd *sc_cmd; struct fcoe_cqe_rsp_info *fcp_rsp; struct qedf_rport *fcport; int refcount; u16 scope, qualifier = 0; u8 fw_residual_flag = 0; unsigned long flags = 0; u16 chk_scope = 0; if (!io_req) return; if (!cqe) return; if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) || test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) || test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) { QEDF_ERR(&qedf->dbg_ctx, "io_req xid=0x%x already in cleanup or abort processing or already completed.\n", io_req->xid); return; } sc_cmd = io_req->sc_cmd; fcp_rsp = &cqe->cqe_info.rsp_info; if (!sc_cmd) { QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n"); return; } if (!qedf_priv(sc_cmd)->io_req) { QEDF_WARN(&(qedf->dbg_ctx), "io_req is NULL, returned in another context.\n"); return; } if (!sc_cmd->device) { QEDF_ERR(&qedf->dbg_ctx, "Device for sc_cmd %p is NULL.\n", sc_cmd); return; } if (!scsi_cmd_to_rq(sc_cmd)->q) { QEDF_WARN(&(qedf->dbg_ctx), "request->q is NULL so request " "is not valid, sc_cmd=%p.\n", sc_cmd); return; } fcport = io_req->fcport; /* * When flush is active, let the cmds be completed from the cleanup * context */ if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) || (test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags) && sc_cmd->device->lun == (u64)fcport->lun_reset_lun)) { QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Dropping good completion xid=0x%x as fcport is flushing", io_req->xid); return; } qedf_parse_fcp_rsp(io_req, fcp_rsp); qedf_unmap_sg_list(qedf, io_req); /* Check for FCP transport error */ if (io_req->fcp_rsp_len > 3 && io_req->fcp_rsp_code) { QEDF_ERR(&(qedf->dbg_ctx), "FCP I/O protocol failure xid=0x%x fcp_rsp_len=%d " "fcp_rsp_code=%d.\n", io_req->xid, io_req->fcp_rsp_len, io_req->fcp_rsp_code); sc_cmd->result = DID_BUS_BUSY << 16; goto out; } fw_residual_flag = GET_FIELD(cqe->cqe_info.rsp_info.fw_error_flags, FCOE_CQE_RSP_INFO_FW_UNDERRUN); if (fw_residual_flag) { QEDF_ERR(&qedf->dbg_ctx, "Firmware detected underrun: xid=0x%x fcp_rsp.flags=0x%02x fcp_resid=%d fw_residual=0x%x lba=%02x%02x%02x%02x.\n", io_req->xid, fcp_rsp->rsp_flags.flags, io_req->fcp_resid, cqe->cqe_info.rsp_info.fw_residual, sc_cmd->cmnd[2], sc_cmd->cmnd[3], sc_cmd->cmnd[4], sc_cmd->cmnd[5]); if (io_req->cdb_status == 0) sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status; else sc_cmd->result = (DID_OK << 16) | io_req->cdb_status; /* * Set resid to the whole buffer length so we won't try to resue * any previously data. */ scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd)); goto out; } switch (io_req->fcp_status) { case FC_GOOD: if (io_req->cdb_status == 0) { /* Good I/O completion */ sc_cmd->result = DID_OK << 16; } else { refcount = kref_read(&io_req->refcount); QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%lld xid=0x%0x op=0x%02x " "lba=%02x%02x%02x%02x cdb_status=%d " "fcp_resid=0x%x refcount=%d.\n", qedf->lport->host->host_no, sc_cmd->device->id, sc_cmd->device->lun, io_req->xid, sc_cmd->cmnd[0], sc_cmd->cmnd[2], sc_cmd->cmnd[3], sc_cmd->cmnd[4], sc_cmd->cmnd[5], io_req->cdb_status, io_req->fcp_resid, refcount); sc_cmd->result = (DID_OK << 16) | io_req->cdb_status; if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL || io_req->cdb_status == SAM_STAT_BUSY) { /* * Check whether we need to set retry_delay at * all based on retry_delay module parameter * and the status qualifier. */ /* Upper 2 bits */ scope = fcp_rsp->retry_delay_timer & 0xC000; /* Lower 14 bits */ qualifier = fcp_rsp->retry_delay_timer & 0x3FFF; if (qedf_retry_delay) chk_scope = 1; /* Record stats */ if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL) qedf->task_set_fulls++; else qedf->busy++; } } if (io_req->fcp_resid) scsi_set_resid(sc_cmd, io_req->fcp_resid); if (chk_scope == 1) { if ((scope == 1 || scope == 2) && (qualifier > 0 && qualifier <= 0x3FEF)) { /* Check we don't go over the max */ if (qualifier > QEDF_RETRY_DELAY_MAX) { qualifier = QEDF_RETRY_DELAY_MAX; QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "qualifier = %d\n", (fcp_rsp->retry_delay_timer & 0x3FFF)); } QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Scope = %d and qualifier = %d", scope, qualifier); /* Take fcport->rport_lock to * update the retry_delay_timestamp */ spin_lock_irqsave(&fcport->rport_lock, flags); fcport->retry_delay_timestamp = jiffies + (qualifier * HZ / 10); spin_unlock_irqrestore(&fcport->rport_lock, flags); } else { QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "combination of scope = %d and qualifier = %d is not handled in qedf.\n", scope, qualifier); } } break; default: QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "fcp_status=%d.\n", io_req->fcp_status); break; } out: if (qedf_io_tracing) qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_RSP); /* * We wait till the end of the function to clear the * outstanding bit in case we need to send an abort */ clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); io_req->sc_cmd = NULL; qedf_priv(sc_cmd)->io_req = NULL; scsi_done(sc_cmd); kref_put(&io_req->refcount, qedf_release_cmd); } /* Return a SCSI command in some other context besides a normal completion */ void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req, int result) { struct scsi_cmnd *sc_cmd; int refcount; if (!io_req) { QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "io_req is NULL\n"); return; } if (test_and_set_bit(QEDF_CMD_ERR_SCSI_DONE, &io_req->flags)) { QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "io_req:%p scsi_done handling already done\n", io_req); return; } /* * We will be done with this command after this call so clear the * outstanding bit. */ clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); sc_cmd = io_req->sc_cmd; if (!sc_cmd) { QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n"); return; } if (!virt_addr_valid(sc_cmd)) { QEDF_ERR(&qedf->dbg_ctx, "sc_cmd=%p is not valid.", sc_cmd); goto bad_scsi_ptr; } if (!qedf_priv(sc_cmd)->io_req) { QEDF_WARN(&(qedf->dbg_ctx), "io_req is NULL, returned in another context.\n"); return; } if (!sc_cmd->device) { QEDF_ERR(&qedf->dbg_ctx, "Device for sc_cmd %p is NULL.\n", sc_cmd); goto bad_scsi_ptr; } if (!virt_addr_valid(sc_cmd->device)) { QEDF_ERR(&qedf->dbg_ctx, "Device pointer for sc_cmd %p is bad.\n", sc_cmd); goto bad_scsi_ptr; } if (!sc_cmd->sense_buffer) { QEDF_ERR(&qedf->dbg_ctx, "sc_cmd->sense_buffer for sc_cmd %p is NULL.\n", sc_cmd); goto bad_scsi_ptr; } if (!virt_addr_valid(sc_cmd->sense_buffer)) { QEDF_ERR(&qedf->dbg_ctx, "sc_cmd->sense_buffer for sc_cmd %p is bad.\n", sc_cmd); goto bad_scsi_ptr; } qedf_unmap_sg_list(qedf, io_req); sc_cmd->result = result << 16; refcount = kref_read(&io_req->refcount); QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%lld: Completing " "sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, " "allowed=%d retries=%d refcount=%d.\n", qedf->lport->host->host_no, sc_cmd->device->id, sc_cmd->device->lun, sc_cmd, sc_cmd->result, sc_cmd->cmnd[0], sc_cmd->cmnd[2], sc_cmd->cmnd[3], sc_cmd->cmnd[4], sc_cmd->cmnd[5], sc_cmd->allowed, sc_cmd->retries, refcount); /* * Set resid to the whole buffer length so we won't try to resue any * previously read data */ scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd)); if (qedf_io_tracing) qedf_trace_io(io_req->fcport, io_req, QEDF_IO_TRACE_RSP); io_req->sc_cmd = NULL; qedf_priv(sc_cmd)->io_req = NULL; scsi_done(sc_cmd); kref_put(&io_req->refcount, qedf_release_cmd); return; bad_scsi_ptr: /* * Clear the io_req->sc_cmd backpointer so we don't try to process * this again */ io_req->sc_cmd = NULL; kref_put(&io_req->refcount, qedf_release_cmd); /* ID: 001 */ } /* * Handle warning type CQE completions. This is mainly used for REC timer * popping. */ void qedf_process_warning_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, struct qedf_ioreq *io_req) { int rval, i; struct qedf_rport *fcport = io_req->fcport; u64 err_warn_bit_map; u8 err_warn = 0xff; if (!cqe) { QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "cqe is NULL for io_req %p xid=0x%x\n", io_req, io_req->xid); return; } QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, " "xid=0x%x\n", io_req->xid); QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "err_warn_bitmap=%08x:%08x\n", le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi), le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo)); QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, " "rx_buff_off=%08x, rx_id=%04x\n", le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off), le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off), le32_to_cpu(cqe->cqe_info.err_info.rx_id)); /* Normalize the error bitmap value to an just an unsigned int */ err_warn_bit_map = (u64) ((u64)cqe->cqe_info.err_info.err_warn_bitmap_hi << 32) | (u64)cqe->cqe_info.err_info.err_warn_bitmap_lo; for (i = 0; i < 64; i++) { if (err_warn_bit_map & (u64)((u64)1 << i)) { err_warn = i; break; } } /* Check if REC TOV expired if this is a tape device */ if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) { if (err_warn == FCOE_WARNING_CODE_REC_TOV_TIMER_EXPIRATION) { QEDF_ERR(&(qedf->dbg_ctx), "REC timer expired.\n"); if (!test_bit(QEDF_CMD_SRR_SENT, &io_req->flags)) { io_req->rx_buf_off = cqe->cqe_info.err_info.rx_buf_off; io_req->tx_buf_off = cqe->cqe_info.err_info.tx_buf_off; io_req->rx_id = cqe->cqe_info.err_info.rx_id; rval = qedf_send_rec(io_req); /* * We only want to abort the io_req if we * can't queue the REC command as we want to * keep the exchange open for recovery. */ if (rval) goto send_abort; } return; } } send_abort: init_completion(&io_req->abts_done); rval = qedf_initiate_abts(io_req, true); if (rval) QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n"); } /* Cleanup a command when we receive an error detection completion */ void qedf_process_error_detect(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, struct qedf_ioreq *io_req) { int rval; if (io_req == NULL) { QEDF_INFO(NULL, QEDF_LOG_IO, "io_req is NULL.\n"); return; } if (io_req->fcport == NULL) { QEDF_INFO(NULL, QEDF_LOG_IO, "fcport is NULL.\n"); return; } if (!cqe) { QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "cqe is NULL for io_req %p\n", io_req); return; } QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, " "xid=0x%x\n", io_req->xid); QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "err_warn_bitmap=%08x:%08x\n", le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi), le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo)); QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, " "rx_buff_off=%08x, rx_id=%04x\n", le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off), le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off), le32_to_cpu(cqe->cqe_info.err_info.rx_id)); /* When flush is active, let the cmds be flushed out from the cleanup context */ if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &io_req->fcport->flags) || (test_bit(QEDF_RPORT_IN_LUN_RESET, &io_req->fcport->flags) && io_req->sc_cmd->device->lun == (u64)io_req->fcport->lun_reset_lun)) { QEDF_ERR(&qedf->dbg_ctx, "Dropping EQE for xid=0x%x as fcport is flushing", io_req->xid); return; } if (qedf->stop_io_on_error) { qedf_stop_all_io(qedf); return; } init_completion(&io_req->abts_done); rval = qedf_initiate_abts(io_req, true); if (rval) QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n"); } static void qedf_flush_els_req(struct qedf_ctx *qedf, struct qedf_ioreq *els_req) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Flushing ELS request xid=0x%x refcount=%d.\n", els_req->xid, kref_read(&els_req->refcount)); /* * Need to distinguish this from a timeout when calling the * els_req->cb_func. */ els_req->event = QEDF_IOREQ_EV_ELS_FLUSH; clear_bit(QEDF_CMD_OUTSTANDING, &els_req->flags); /* Cancel the timer */ cancel_delayed_work_sync(&els_req->timeout_work); /* Call callback function to complete command */ if (els_req->cb_func && els_req->cb_arg) { els_req->cb_func(els_req->cb_arg); els_req->cb_arg = NULL; } /* Release kref for original initiate_els */ kref_put(&els_req->refcount, qedf_release_cmd); } /* A value of -1 for lun is a wild card that means flush all * active SCSI I/Os for the target. */ void qedf_flush_active_ios(struct qedf_rport *fcport, int lun) { struct qedf_ioreq *io_req; struct qedf_ctx *qedf; struct qedf_cmd_mgr *cmd_mgr; int i, rc; unsigned long flags; int flush_cnt = 0; int wait_cnt = 100; int refcount = 0; if (!fcport) { QEDF_ERR(NULL, "fcport is NULL\n"); return; } /* Check that fcport is still offloaded */ if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { QEDF_ERR(NULL, "fcport is no longer offloaded.\n"); return; } qedf = fcport->qedf; if (!qedf) { QEDF_ERR(NULL, "qedf is NULL.\n"); return; } /* Only wait for all commands to be queued in the Upload context */ if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) && (lun == -1)) { while (atomic_read(&fcport->ios_to_queue)) { QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Waiting for %d I/Os to be queued\n", atomic_read(&fcport->ios_to_queue)); if (wait_cnt == 0) { QEDF_ERR(NULL, "%d IOs request could not be queued\n", atomic_read(&fcport->ios_to_queue)); } msleep(20); wait_cnt--; } } cmd_mgr = qedf->cmd_mgr; QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Flush active i/o's num=0x%x fcport=0x%p port_id=0x%06x scsi_id=%d.\n", atomic_read(&fcport->num_active_ios), fcport, fcport->rdata->ids.port_id, fcport->rport->scsi_target_id); QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Locking flush mutex.\n"); mutex_lock(&qedf->flush_mutex); if (lun == -1) { set_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags); } else { set_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags); fcport->lun_reset_lun = lun; } for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) { io_req = &cmd_mgr->cmds[i]; if (!io_req) continue; if (!io_req->fcport) continue; spin_lock_irqsave(&cmd_mgr->lock, flags); if (io_req->alloc) { if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) { if (io_req->cmd_type == QEDF_SCSI_CMD) QEDF_ERR(&qedf->dbg_ctx, "Allocated but not queued, xid=0x%x\n", io_req->xid); } spin_unlock_irqrestore(&cmd_mgr->lock, flags); } else { spin_unlock_irqrestore(&cmd_mgr->lock, flags); continue; } if (io_req->fcport != fcport) continue; /* In case of ABTS, CMD_OUTSTANDING is cleared on ABTS response, * but RRQ is still pending. * Workaround: Within qedf_send_rrq, we check if the fcport is * NULL, and we drop the ref on the io_req to clean it up. */ if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) { refcount = kref_read(&io_req->refcount); QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Not outstanding, xid=0x%x, cmd_type=%d refcount=%d.\n", io_req->xid, io_req->cmd_type, refcount); /* If RRQ work has been queue, try to cancel it and * free the io_req */ if (atomic_read(&io_req->state) == QEDFC_CMD_ST_RRQ_WAIT) { if (cancel_delayed_work_sync (&io_req->rrq_work)) { QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Putting reference for pending RRQ work xid=0x%x.\n", io_req->xid); /* ID: 003 */ kref_put(&io_req->refcount, qedf_release_cmd); } } continue; } /* Only consider flushing ELS during target reset */ if (io_req->cmd_type == QEDF_ELS && lun == -1) { rc = kref_get_unless_zero(&io_req->refcount); if (!rc) { QEDF_ERR(&(qedf->dbg_ctx), "Could not get kref for ELS io_req=0x%p xid=0x%x.\n", io_req, io_req->xid); continue; } qedf_initiate_cleanup(io_req, false); flush_cnt++; qedf_flush_els_req(qedf, io_req); /* * Release the kref and go back to the top of the * loop. */ goto free_cmd; } if (io_req->cmd_type == QEDF_ABTS) { /* ID: 004 */ rc = kref_get_unless_zero(&io_req->refcount); if (!rc) { QEDF_ERR(&(qedf->dbg_ctx), "Could not get kref for abort io_req=0x%p xid=0x%x.\n", io_req, io_req->xid); continue; } if (lun != -1 && io_req->lun != lun) goto free_cmd; QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Flushing abort xid=0x%x.\n", io_req->xid); if (cancel_delayed_work_sync(&io_req->rrq_work)) { QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Putting ref for cancelled RRQ work xid=0x%x.\n", io_req->xid); kref_put(&io_req->refcount, qedf_release_cmd); } if (cancel_delayed_work_sync(&io_req->timeout_work)) { QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Putting ref for cancelled tmo work xid=0x%x.\n", io_req->xid); qedf_initiate_cleanup(io_req, true); /* Notify eh_abort handler that ABTS is * complete */ complete(&io_req->abts_done); clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags); /* ID: 002 */ kref_put(&io_req->refcount, qedf_release_cmd); } flush_cnt++; goto free_cmd; } if (!io_req->sc_cmd) continue; if (!io_req->sc_cmd->device) { QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Device backpointer NULL for sc_cmd=%p.\n", io_req->sc_cmd); /* Put reference for non-existent scsi_cmnd */ io_req->sc_cmd = NULL; qedf_initiate_cleanup(io_req, false); kref_put(&io_req->refcount, qedf_release_cmd); continue; } if (lun > -1) { if (io_req->lun != lun) continue; } /* * Use kref_get_unless_zero in the unlikely case the command * we're about to flush was completed in the normal SCSI path */ rc = kref_get_unless_zero(&io_req->refcount); if (!rc) { QEDF_ERR(&(qedf->dbg_ctx), "Could not get kref for " "io_req=0x%p xid=0x%x\n", io_req, io_req->xid); continue; } QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Cleanup xid=0x%x.\n", io_req->xid); flush_cnt++; /* Cleanup task and return I/O mid-layer */ qedf_initiate_cleanup(io_req, true); free_cmd: kref_put(&io_req->refcount, qedf_release_cmd); /* ID: 004 */ } wait_cnt = 60; QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Flushed 0x%x I/Os, active=0x%x.\n", flush_cnt, atomic_read(&fcport->num_active_ios)); /* Only wait for all commands to complete in the Upload context */ if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) && (lun == -1)) { while (atomic_read(&fcport->num_active_ios)) { QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Flushed 0x%x I/Os, active=0x%x cnt=%d.\n", flush_cnt, atomic_read(&fcport->num_active_ios), wait_cnt); if (wait_cnt == 0) { QEDF_ERR(&qedf->dbg_ctx, "Flushed %d I/Os, active=%d.\n", flush_cnt, atomic_read(&fcport->num_active_ios)); for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) { io_req = &cmd_mgr->cmds[i]; if (io_req->fcport && io_req->fcport == fcport) { refcount = kref_read(&io_req->refcount); set_bit(QEDF_CMD_DIRTY, &io_req->flags); QEDF_ERR(&qedf->dbg_ctx, "Outstanding io_req =%p xid=0x%x flags=0x%lx, sc_cmd=%p refcount=%d cmd_type=%d.\n", io_req, io_req->xid, io_req->flags, io_req->sc_cmd, refcount, io_req->cmd_type); } } WARN_ON(1); break; } msleep(500); wait_cnt--; } } clear_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags); clear_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags); QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Unlocking flush mutex.\n"); mutex_unlock(&qedf->flush_mutex); } /* * Initiate a ABTS middle path command. Note that we don't have to initialize * the task context for an ABTS task. */ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts) { struct fc_lport *lport; struct qedf_rport *fcport = io_req->fcport; struct fc_rport_priv *rdata; struct qedf_ctx *qedf; u16 xid; int rc = 0; unsigned long flags; struct fcoe_wqe *sqe; u16 sqe_idx; int refcount = 0; /* Sanity check qedf_rport before dereferencing any pointers */ if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { QEDF_ERR(NULL, "tgt not offloaded\n"); rc = 1; goto out; } qedf = fcport->qedf; rdata = fcport->rdata; if (!rdata || !kref_get_unless_zero(&rdata->kref)) { QEDF_ERR(&qedf->dbg_ctx, "stale rport\n"); rc = 1; goto out; } lport = qedf->lport; if (lport->state != LPORT_ST_READY || !(lport->link_up)) { QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n"); rc = 1; goto drop_rdata_kref; } if (atomic_read(&qedf->link_down_tmo_valid) > 0) { QEDF_ERR(&(qedf->dbg_ctx), "link_down_tmo active.\n"); rc = 1; goto drop_rdata_kref; } /* Ensure room on SQ */ if (!atomic_read(&fcport->free_sqes)) { QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n"); rc = 1; goto drop_rdata_kref; } if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { QEDF_ERR(&qedf->dbg_ctx, "fcport is uploading.\n"); rc = 1; goto drop_rdata_kref; } spin_lock_irqsave(&fcport->rport_lock, flags); if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) || test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) || test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) { QEDF_ERR(&qedf->dbg_ctx, "io_req xid=0x%x sc_cmd=%p already in cleanup or abort processing or already completed.\n", io_req->xid, io_req->sc_cmd); rc = 1; spin_unlock_irqrestore(&fcport->rport_lock, flags); goto drop_rdata_kref; } /* Set the command type to abort */ io_req->cmd_type = QEDF_ABTS; spin_unlock_irqrestore(&fcport->rport_lock, flags); kref_get(&io_req->refcount); xid = io_req->xid; qedf->control_requests++; qedf->packet_aborts++; io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts; set_bit(QEDF_CMD_IN_ABORT, &io_req->flags); refcount = kref_read(&io_req->refcount); QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM, "ABTS io_req xid = 0x%x refcount=%d\n", xid, refcount); qedf_cmd_timer_set(qedf, io_req, QEDF_ABORT_TIMEOUT); spin_lock_irqsave(&fcport->rport_lock, flags); sqe_idx = qedf_get_sqe_idx(fcport); sqe = &fcport->sq[sqe_idx]; memset(sqe, 0, sizeof(struct fcoe_wqe)); io_req->task_params->sqe = sqe; init_initiator_abort_fcoe_task(io_req->task_params); qedf_ring_doorbell(fcport); spin_unlock_irqrestore(&fcport->rport_lock, flags); drop_rdata_kref: kref_put(&rdata->kref, fc_rport_destroy); out: return rc; } void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, struct qedf_ioreq *io_req) { uint32_t r_ctl; int rc; struct qedf_rport *fcport = io_req->fcport; QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "Entered with xid = " "0x%x cmd_type = %d\n", io_req->xid, io_req->cmd_type); r_ctl = cqe->cqe_info.abts_info.r_ctl; /* This was added at a point when we were scheduling abts_compl & * cleanup_compl on different CPUs and there was a possibility of * the io_req to be freed from the other context before we got here. */ if (!fcport) { QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Dropping ABTS completion xid=0x%x as fcport is NULL", io_req->xid); return; } /* * When flush is active, let the cmds be completed from the cleanup * context */ if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) || test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags)) { QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Dropping ABTS completion xid=0x%x as fcport is flushing", io_req->xid); return; } if (!cancel_delayed_work(&io_req->timeout_work)) { QEDF_ERR(&qedf->dbg_ctx, "Wasn't able to cancel abts timeout work.\n"); } switch (r_ctl) { case FC_RCTL_BA_ACC: QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "ABTS response - ACC Send RRQ after R_A_TOV\n"); io_req->event = QEDF_IOREQ_EV_ABORT_SUCCESS; rc = kref_get_unless_zero(&io_req->refcount); /* ID: 003 */ if (!rc) { QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM, "kref is already zero so ABTS was already completed or flushed xid=0x%x.\n", io_req->xid); return; } /* * Dont release this cmd yet. It will be relesed * after we get RRQ response */ queue_delayed_work(qedf->dpc_wq, &io_req->rrq_work, msecs_to_jiffies(qedf->lport->r_a_tov)); atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_WAIT); break; /* For error cases let the cleanup return the command */ case FC_RCTL_BA_RJT: QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "ABTS response - RJT\n"); io_req->event = QEDF_IOREQ_EV_ABORT_FAILED; break; default: QEDF_ERR(&(qedf->dbg_ctx), "Unknown ABTS response\n"); break; } clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags); if (io_req->sc_cmd) { if (!io_req->return_scsi_cmd_on_abts) QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM, "Not call scsi_done for xid=0x%x.\n", io_req->xid); if (io_req->return_scsi_cmd_on_abts) qedf_scsi_done(qedf, io_req, DID_ERROR); } /* Notify eh_abort handler that ABTS is complete */ complete(&io_req->abts_done); kref_put(&io_req->refcount, qedf_release_cmd); } int qedf_init_mp_req(struct qedf_ioreq *io_req) { struct qedf_mp_req *mp_req; struct scsi_sge *mp_req_bd; struct scsi_sge *mp_resp_bd; struct qedf_ctx *qedf = io_req->fcport->qedf; dma_addr_t addr; uint64_t sz; QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_MP_REQ, "Entered.\n"); mp_req = (struct qedf_mp_req *)&(io_req->mp_req); memset(mp_req, 0, sizeof(struct qedf_mp_req)); if (io_req->cmd_type != QEDF_ELS) { mp_req->req_len = sizeof(struct fcp_cmnd); io_req->data_xfer_len = mp_req->req_len; } else mp_req->req_len = io_req->data_xfer_len; mp_req->req_buf = dma_alloc_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE, &mp_req->req_buf_dma, GFP_KERNEL); if (!mp_req->req_buf) { QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req buffer\n"); qedf_free_mp_resc(io_req); return -ENOMEM; } mp_req->resp_buf = dma_alloc_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE, &mp_req->resp_buf_dma, GFP_KERNEL); if (!mp_req->resp_buf) { QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc TM resp " "buffer\n"); qedf_free_mp_resc(io_req); return -ENOMEM; } /* Allocate and map mp_req_bd and mp_resp_bd */ sz = sizeof(struct scsi_sge); mp_req->mp_req_bd = dma_alloc_coherent(&qedf->pdev->dev, sz, &mp_req->mp_req_bd_dma, GFP_KERNEL); if (!mp_req->mp_req_bd) { QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req bd\n"); qedf_free_mp_resc(io_req); return -ENOMEM; } mp_req->mp_resp_bd = dma_alloc_coherent(&qedf->pdev->dev, sz, &mp_req->mp_resp_bd_dma, GFP_KERNEL); if (!mp_req->mp_resp_bd) { QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP resp bd\n"); qedf_free_mp_resc(io_req); return -ENOMEM; } /* Fill bd table */ addr = mp_req->req_buf_dma; mp_req_bd = mp_req->mp_req_bd; mp_req_bd->sge_addr.lo = U64_LO(addr); mp_req_bd->sge_addr.hi = U64_HI(addr); mp_req_bd->sge_len = QEDF_PAGE_SIZE; /* * MP buffer is either a task mgmt command or an ELS. * So the assumption is that it consumes a single bd * entry in the bd table */ mp_resp_bd = mp_req->mp_resp_bd; addr = mp_req->resp_buf_dma; mp_resp_bd->sge_addr.lo = U64_LO(addr); mp_resp_bd->sge_addr.hi = U64_HI(addr); mp_resp_bd->sge_len = QEDF_PAGE_SIZE; return 0; } /* * Last ditch effort to clear the port if it's stuck. Used only after a * cleanup task times out. */ static void qedf_drain_request(struct qedf_ctx *qedf) { if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) { QEDF_ERR(&(qedf->dbg_ctx), "MCP drain already active.\n"); return; } /* Set bit to return all queuecommand requests as busy */ set_bit(QEDF_DRAIN_ACTIVE, &qedf->flags); /* Call qed drain request for function. Should be synchronous */ qed_ops->common->drain(qedf->cdev); /* Settle time for CQEs to be returned */ msleep(100); /* Unplug and continue */ clear_bit(QEDF_DRAIN_ACTIVE, &qedf->flags); } /* * Returns SUCCESS if the cleanup task does not timeout, otherwise return * FAILURE. */ int qedf_initiate_cleanup(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts) { struct qedf_rport *fcport; struct qedf_ctx *qedf; int tmo = 0; int rc = SUCCESS; unsigned long flags; struct fcoe_wqe *sqe; u16 sqe_idx; int refcount = 0; fcport = io_req->fcport; if (!fcport) { QEDF_ERR(NULL, "fcport is NULL.\n"); return SUCCESS; } /* Sanity check qedf_rport before dereferencing any pointers */ if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { QEDF_ERR(NULL, "tgt not offloaded\n"); return SUCCESS; } qedf = fcport->qedf; if (!qedf) { QEDF_ERR(NULL, "qedf is NULL.\n"); return SUCCESS; } if (io_req->cmd_type == QEDF_ELS) { goto process_els; } if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) || test_and_set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) { QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in " "cleanup processing or already completed.\n", io_req->xid); return SUCCESS; } set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags); process_els: /* Ensure room on SQ */ if (!atomic_read(&fcport->free_sqes)) { QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n"); /* Need to make sure we clear the flag since it was set */ clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags); return FAILED; } if (io_req->cmd_type == QEDF_CLEANUP) { QEDF_ERR(&qedf->dbg_ctx, "io_req=0x%x is already a cleanup command cmd_type=%d.\n", io_req->xid, io_req->cmd_type); clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags); return SUCCESS; } refcount = kref_read(&io_req->refcount); QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Entered xid=0x%x sc_cmd=%p cmd_type=%d flags=0x%lx refcount=%d fcport=%p port_id=0x%06x\n", io_req->xid, io_req->sc_cmd, io_req->cmd_type, io_req->flags, refcount, fcport, fcport->rdata->ids.port_id); /* Cleanup cmds re-use the same TID as the original I/O */ spin_lock_irqsave(&fcport->rport_lock, flags); io_req->cmd_type = QEDF_CLEANUP; spin_unlock_irqrestore(&fcport->rport_lock, flags); io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts; init_completion(&io_req->cleanup_done); spin_lock_irqsave(&fcport->rport_lock, flags); sqe_idx = qedf_get_sqe_idx(fcport); sqe = &fcport->sq[sqe_idx]; memset(sqe, 0, sizeof(struct fcoe_wqe)); io_req->task_params->sqe = sqe; init_initiator_cleanup_fcoe_task(io_req->task_params); qedf_ring_doorbell(fcport); spin_unlock_irqrestore(&fcport->rport_lock, flags); tmo = wait_for_completion_timeout(&io_req->cleanup_done, QEDF_CLEANUP_TIMEOUT * HZ); if (!tmo) { rc = FAILED; /* Timeout case */ QEDF_ERR(&(qedf->dbg_ctx), "Cleanup command timeout, " "xid=%x.\n", io_req->xid); clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags); /* Issue a drain request if cleanup task times out */ QEDF_ERR(&(qedf->dbg_ctx), "Issuing MCP drain request.\n"); qedf_drain_request(qedf); } /* If it TASK MGMT handle it, reference will be decreased * in qedf_execute_tmf */ if (io_req->tm_flags == FCP_TMF_LUN_RESET || io_req->tm_flags == FCP_TMF_TGT_RESET) { clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); io_req->sc_cmd = NULL; kref_put(&io_req->refcount, qedf_release_cmd); complete(&io_req->tm_done); } if (io_req->sc_cmd) { if (!io_req->return_scsi_cmd_on_abts) QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM, "Not call scsi_done for xid=0x%x.\n", io_req->xid); if (io_req->return_scsi_cmd_on_abts) qedf_scsi_done(qedf, io_req, DID_ERROR); } if (rc == SUCCESS) io_req->event = QEDF_IOREQ_EV_CLEANUP_SUCCESS; else io_req->event = QEDF_IOREQ_EV_CLEANUP_FAILED; return rc; } void qedf_process_cleanup_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, struct qedf_ioreq *io_req) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid = 0x%x\n", io_req->xid); clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags); /* Complete so we can finish cleaning up the I/O */ complete(&io_req->cleanup_done); } static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd, uint8_t tm_flags) { struct qedf_ioreq *io_req; struct fcoe_task_context *task; struct qedf_ctx *qedf = fcport->qedf; struct fc_lport *lport = qedf->lport; int rc = 0; uint16_t xid; int tmo = 0; int lun = 0; unsigned long flags; struct fcoe_wqe *sqe; u16 sqe_idx; if (!sc_cmd) { QEDF_ERR(&qedf->dbg_ctx, "sc_cmd is NULL\n"); return FAILED; } lun = (int)sc_cmd->device->lun; if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { QEDF_ERR(&(qedf->dbg_ctx), "fcport not offloaded\n"); rc = FAILED; goto no_flush; } io_req = qedf_alloc_cmd(fcport, QEDF_TASK_MGMT_CMD); if (!io_req) { QEDF_ERR(&(qedf->dbg_ctx), "Failed TMF"); rc = -EAGAIN; goto no_flush; } if (tm_flags == FCP_TMF_LUN_RESET) qedf->lun_resets++; else if (tm_flags == FCP_TMF_TGT_RESET) qedf->target_resets++; /* Initialize rest of io_req fields */ io_req->sc_cmd = sc_cmd; io_req->fcport = fcport; io_req->cmd_type = QEDF_TASK_MGMT_CMD; /* Record which cpu this request is associated with */ io_req->cpu = smp_processor_id(); /* Set TM flags */ io_req->io_req_flags = QEDF_READ; io_req->data_xfer_len = 0; io_req->tm_flags = tm_flags; /* Default is to return a SCSI command when an error occurs */ io_req->return_scsi_cmd_on_abts = false; /* Obtain exchange id */ xid = io_req->xid; QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "TMF io_req xid = " "0x%x\n", xid); /* Initialize task context for this IO request */ task = qedf_get_task_mem(&qedf->tasks, xid); init_completion(&io_req->tm_done); spin_lock_irqsave(&fcport->rport_lock, flags); sqe_idx = qedf_get_sqe_idx(fcport); sqe = &fcport->sq[sqe_idx]; memset(sqe, 0, sizeof(struct fcoe_wqe)); qedf_init_task(fcport, lport, io_req, task, sqe); qedf_ring_doorbell(fcport); spin_unlock_irqrestore(&fcport->rport_lock, flags); set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); tmo = wait_for_completion_timeout(&io_req->tm_done, QEDF_TM_TIMEOUT * HZ); if (!tmo) { rc = FAILED; QEDF_ERR(&(qedf->dbg_ctx), "wait for tm_cmpl timeout!\n"); /* Clear outstanding bit since command timed out */ clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); io_req->sc_cmd = NULL; } else { /* Check TMF response code */ if (io_req->fcp_rsp_code == 0) rc = SUCCESS; else rc = FAILED; } /* * Double check that fcport has not gone into an uploading state before * executing the command flush for the LUN/target. */ if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { QEDF_ERR(&qedf->dbg_ctx, "fcport is uploading, not executing flush.\n"); goto no_flush; } /* We do not need this io_req any more */ kref_put(&io_req->refcount, qedf_release_cmd); if (tm_flags == FCP_TMF_LUN_RESET) qedf_flush_active_ios(fcport, lun); else qedf_flush_active_ios(fcport, -1); no_flush: if (rc != SUCCESS) { QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command failed...\n"); rc = FAILED; } else { QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command success...\n"); rc = SUCCESS; } return rc; } int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags) { struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); struct fc_rport_libfc_priv *rp = rport->dd_data; struct qedf_rport *fcport = (struct qedf_rport *)&rp[1]; struct qedf_ctx *qedf; struct fc_lport *lport = shost_priv(sc_cmd->device->host); int rc = SUCCESS; int rval; struct qedf_ioreq *io_req = NULL; int ref_cnt = 0; struct fc_rport_priv *rdata = fcport->rdata; QEDF_ERR(NULL, "tm_flags 0x%x sc_cmd %p op = 0x%02x target_id = 0x%x lun=%d\n", tm_flags, sc_cmd, sc_cmd->cmd_len ? sc_cmd->cmnd[0] : 0xff, rport->scsi_target_id, (int)sc_cmd->device->lun); if (!rdata || !kref_get_unless_zero(&rdata->kref)) { QEDF_ERR(NULL, "stale rport\n"); return FAILED; } QEDF_ERR(NULL, "portid=%06x tm_flags =%s\n", rdata->ids.port_id, (tm_flags == FCP_TMF_TGT_RESET) ? "TARGET RESET" : "LUN RESET"); if (qedf_priv(sc_cmd)->io_req) { io_req = qedf_priv(sc_cmd)->io_req; ref_cnt = kref_read(&io_req->refcount); QEDF_ERR(NULL, "orig io_req = %p xid = 0x%x ref_cnt = %d.\n", io_req, io_req->xid, ref_cnt); } rval = fc_remote_port_chkready(rport); if (rval) { QEDF_ERR(NULL, "device_reset rport not ready\n"); rc = FAILED; goto tmf_err; } rc = fc_block_scsi_eh(sc_cmd); if (rc) goto tmf_err; if (!fcport) { QEDF_ERR(NULL, "device_reset: rport is NULL\n"); rc = FAILED; goto tmf_err; } qedf = fcport->qedf; if (!qedf) { QEDF_ERR(NULL, "qedf is NULL.\n"); rc = FAILED; goto tmf_err; } if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { QEDF_ERR(&qedf->dbg_ctx, "Connection is getting uploaded.\n"); rc = SUCCESS; goto tmf_err; } if (test_bit(QEDF_UNLOADING, &qedf->flags) || test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) { rc = SUCCESS; goto tmf_err; } if (lport->state != LPORT_ST_READY || !(lport->link_up)) { QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n"); rc = FAILED; goto tmf_err; } if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { if (!fcport->rdata) QEDF_ERR(&qedf->dbg_ctx, "fcport %p is uploading.\n", fcport); else QEDF_ERR(&qedf->dbg_ctx, "fcport %p port_id=%06x is uploading.\n", fcport, fcport->rdata->ids.port_id); rc = FAILED; goto tmf_err; } rc = qedf_execute_tmf(fcport, sc_cmd, tm_flags); tmf_err: kref_put(&rdata->kref, fc_rport_destroy); return rc; } void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, struct qedf_ioreq *io_req) { struct fcoe_cqe_rsp_info *fcp_rsp; clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); fcp_rsp = &cqe->cqe_info.rsp_info; qedf_parse_fcp_rsp(io_req, fcp_rsp); io_req->sc_cmd = NULL; complete(&io_req->tm_done); } void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx, struct fcoe_cqe *cqe) { unsigned long flags; uint16_t pktlen = cqe->cqe_info.unsolic_info.pkt_len; u32 payload_len, crc; struct fc_frame_header *fh; struct fc_frame *fp; struct qedf_io_work *io_work; u32 bdq_idx; void *bdq_addr; struct scsi_bd *p_bd_info; p_bd_info = &cqe->cqe_info.unsolic_info.bd_info; QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL, "address.hi=%x, address.lo=%x, opaque_data.hi=%x, opaque_data.lo=%x, bdq_prod_idx=%u, len=%u\n", le32_to_cpu(p_bd_info->address.hi), le32_to_cpu(p_bd_info->address.lo), le32_to_cpu(p_bd_info->opaque.fcoe_opaque.hi), le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo), qedf->bdq_prod_idx, pktlen); bdq_idx = le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo); if (bdq_idx >= QEDF_BDQ_SIZE) { QEDF_ERR(&(qedf->dbg_ctx), "bdq_idx is out of range %d.\n", bdq_idx); goto increment_prod; } bdq_addr = qedf->bdq[bdq_idx].buf_addr; if (!bdq_addr) { QEDF_ERR(&(qedf->dbg_ctx), "bdq_addr is NULL, dropping " "unsolicited packet.\n"); goto increment_prod; } if (qedf_dump_frames) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL, "BDQ frame is at addr=%p.\n", bdq_addr); print_hex_dump(KERN_WARNING, "bdq ", DUMP_PREFIX_OFFSET, 16, 1, (void *)bdq_addr, pktlen, false); } /* Allocate frame */ payload_len = pktlen - sizeof(struct fc_frame_header); fp = fc_frame_alloc(qedf->lport, payload_len); if (!fp) { QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate fp.\n"); goto increment_prod; } /* Copy data from BDQ buffer into fc_frame struct */ fh = (struct fc_frame_header *)fc_frame_header_get(fp); memcpy(fh, (void *)bdq_addr, pktlen); QEDF_WARN(&qedf->dbg_ctx, "Processing Unsolicated frame, src=%06x dest=%06x r_ctl=0x%x type=0x%x cmd=%02x\n", ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl, fh->fh_type, fc_frame_payload_op(fp)); /* Initialize the frame so libfc sees it as a valid frame */ crc = fcoe_fc_crc(fp); fc_frame_init(fp); fr_dev(fp) = qedf->lport; fr_sof(fp) = FC_SOF_I3; fr_eof(fp) = FC_EOF_T; fr_crc(fp) = cpu_to_le32(~crc); /* * We need to return the frame back up to libfc in a non-atomic * context */ io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC); if (!io_work) { QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate " "work for I/O completion.\n"); fc_frame_free(fp); goto increment_prod; } memset(io_work, 0, sizeof(struct qedf_io_work)); INIT_WORK(&io_work->work, qedf_fp_io_handler); /* Copy contents of CQE for deferred processing */ memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe)); io_work->qedf = qedf; io_work->fp = fp; queue_work_on(smp_processor_id(), qedf_io_wq, &io_work->work); increment_prod: spin_lock_irqsave(&qedf->hba_lock, flags); /* Increment producer to let f/w know we've handled the frame */ qedf->bdq_prod_idx++; /* Producer index wraps at uint16_t boundary */ if (qedf->bdq_prod_idx == 0xffff) qedf->bdq_prod_idx = 0; writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod); readw(qedf->bdq_primary_prod); writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod); readw(qedf->bdq_secondary_prod); spin_unlock_irqrestore(&qedf->hba_lock, flags); }
linux-master
drivers/scsi/qedf/qedf_io.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic iSCSI HBA Driver * Copyright (c) 2003-2013 QLogic Corporation */ #include "ql4_def.h" #include "ql4_glbl.h" #include "ql4_dbg.h" #include "ql4_inline.h" /** * qla4xxx_copy_sense - copy sense data into cmd sense buffer * @ha: Pointer to host adapter structure. * @sts_entry: Pointer to status entry structure. * @srb: Pointer to srb structure. **/ static void qla4xxx_copy_sense(struct scsi_qla_host *ha, struct status_entry *sts_entry, struct srb *srb) { struct scsi_cmnd *cmd = srb->cmd; uint16_t sense_len; memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); sense_len = le16_to_cpu(sts_entry->senseDataByteCnt); if (sense_len == 0) { DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d:%llu: %s:" " sense len 0\n", ha->host_no, cmd->device->channel, cmd->device->id, cmd->device->lun, __func__)); ha->status_srb = NULL; return; } /* Save total available sense length, * not to exceed cmd's sense buffer size */ sense_len = min_t(uint16_t, sense_len, SCSI_SENSE_BUFFERSIZE); srb->req_sense_ptr = cmd->sense_buffer; srb->req_sense_len = sense_len; /* Copy sense from sts_entry pkt */ sense_len = min_t(uint16_t, sense_len, IOCB_MAX_SENSEDATA_LEN); memcpy(cmd->sense_buffer, sts_entry->senseData, sense_len); DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%llu: %s: sense key = %x, " "ASL= %02x, ASC/ASCQ = %02x/%02x\n", ha->host_no, cmd->device->channel, cmd->device->id, cmd->device->lun, __func__, sts_entry->senseData[2] & 0x0f, sts_entry->senseData[7], sts_entry->senseData[12], sts_entry->senseData[13])); DEBUG5(qla4xxx_dump_buffer(cmd->sense_buffer, sense_len)); srb->flags |= SRB_GOT_SENSE; /* Update srb, in case a sts_cont pkt follows */ srb->req_sense_ptr += sense_len; srb->req_sense_len -= sense_len; if (srb->req_sense_len != 0) ha->status_srb = srb; else ha->status_srb = NULL; } /** * qla4xxx_status_cont_entry - Process a Status Continuations entry. * @ha: SCSI driver HA context * @sts_cont: Entry pointer * * Extended sense data. */ static void qla4xxx_status_cont_entry(struct scsi_qla_host *ha, struct status_cont_entry *sts_cont) { struct srb *srb = ha->status_srb; struct scsi_cmnd *cmd; uint16_t sense_len; if (srb == NULL) return; cmd = srb->cmd; if (cmd == NULL) { DEBUG2(printk(KERN_INFO "scsi%ld: %s: Cmd already returned " "back to OS srb=%p srb->state:%d\n", ha->host_no, __func__, srb, srb->state)); ha->status_srb = NULL; return; } /* Copy sense data. */ sense_len = min_t(uint16_t, srb->req_sense_len, IOCB_MAX_EXT_SENSEDATA_LEN); memcpy(srb->req_sense_ptr, sts_cont->ext_sense_data, sense_len); DEBUG5(qla4xxx_dump_buffer(srb->req_sense_ptr, sense_len)); srb->req_sense_ptr += sense_len; srb->req_sense_len -= sense_len; /* Place command on done queue. */ if (srb->req_sense_len == 0) { kref_put(&srb->srb_ref, qla4xxx_srb_compl); ha->status_srb = NULL; } } /** * qla4xxx_status_entry - processes status IOCBs * @ha: Pointer to host adapter structure. * @sts_entry: Pointer to status entry structure. **/ static void qla4xxx_status_entry(struct scsi_qla_host *ha, struct status_entry *sts_entry) { uint8_t scsi_status; struct scsi_cmnd *cmd; struct srb *srb; struct ddb_entry *ddb_entry; uint32_t residual; srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle)); if (!srb) { ql4_printk(KERN_WARNING, ha, "%s invalid status entry: " "handle=0x%0x, srb=%p\n", __func__, sts_entry->handle, srb); if (is_qla80XX(ha)) set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); else set_bit(DPC_RESET_HA, &ha->dpc_flags); return; } cmd = srb->cmd; if (cmd == NULL) { DEBUG2(printk("scsi%ld: %s: Command already returned back to " "OS pkt->handle=%d srb=%p srb->state:%d\n", ha->host_no, __func__, sts_entry->handle, srb, srb->state)); ql4_printk(KERN_WARNING, ha, "Command is NULL:" " already returned to OS (srb=%p)\n", srb); return; } ddb_entry = srb->ddb; if (ddb_entry == NULL) { cmd->result = DID_NO_CONNECT << 16; goto status_entry_exit; } residual = le32_to_cpu(sts_entry->residualByteCnt); /* Translate ISP error to a Linux SCSI error. */ scsi_status = sts_entry->scsiStatus; switch (sts_entry->completionStatus) { case SCS_COMPLETE: if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) { cmd->result = DID_ERROR << 16; break; } if (sts_entry->iscsiFlags &ISCSI_FLAG_RESIDUAL_UNDER) { scsi_set_resid(cmd, residual); if (!scsi_status && ((scsi_bufflen(cmd) - residual) < cmd->underflow)) { cmd->result = DID_ERROR << 16; DEBUG2(printk("scsi%ld:%d:%d:%llu: %s: " "Mid-layer Data underrun0, " "xferlen = 0x%x, " "residual = 0x%x\n", ha->host_no, cmd->device->channel, cmd->device->id, cmd->device->lun, __func__, scsi_bufflen(cmd), residual)); break; } } cmd->result = DID_OK << 16 | scsi_status; if (scsi_status != SAM_STAT_CHECK_CONDITION) break; /* Copy Sense Data into sense buffer. */ qla4xxx_copy_sense(ha, sts_entry, srb); break; case SCS_INCOMPLETE: /* Always set the status to DID_ERROR, since * all conditions result in that status anyway */ cmd->result = DID_ERROR << 16; break; case SCS_RESET_OCCURRED: DEBUG2(printk("scsi%ld:%d:%d:%llu: %s: Device RESET occurred\n", ha->host_no, cmd->device->channel, cmd->device->id, cmd->device->lun, __func__)); cmd->result = DID_RESET << 16; break; case SCS_ABORTED: DEBUG2(printk("scsi%ld:%d:%d:%llu: %s: Abort occurred\n", ha->host_no, cmd->device->channel, cmd->device->id, cmd->device->lun, __func__)); cmd->result = DID_RESET << 16; break; case SCS_TIMEOUT: DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%llu: Timeout\n", ha->host_no, cmd->device->channel, cmd->device->id, cmd->device->lun)); cmd->result = DID_TRANSPORT_DISRUPTED << 16; /* * Mark device missing so that we won't continue to send * I/O to this device. We should get a ddb state change * AEN soon. */ if (iscsi_is_session_online(ddb_entry->sess)) qla4xxx_mark_device_missing(ddb_entry->sess); break; case SCS_DATA_UNDERRUN: case SCS_DATA_OVERRUN: if ((sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) || (sts_entry->completionStatus == SCS_DATA_OVERRUN)) { DEBUG2(printk("scsi%ld:%d:%d:%llu: %s: " "Data overrun\n", ha->host_no, cmd->device->channel, cmd->device->id, cmd->device->lun, __func__)); cmd->result = DID_ERROR << 16; break; } scsi_set_resid(cmd, residual); if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_UNDER) { /* Both the firmware and target reported UNDERRUN: * * MID-LAYER UNDERFLOW case: * Some kernels do not properly detect midlayer * underflow, so we manually check it and return * ERROR if the minimum required data was not * received. * * ALL OTHER cases: * Fall thru to check scsi_status */ if (!scsi_status && (scsi_bufflen(cmd) - residual) < cmd->underflow) { DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d:%llu: %s: Mid-layer Data underrun, xferlen = 0x%x,residual = 0x%x\n", ha->host_no, cmd->device->channel, cmd->device->id, cmd->device->lun, __func__, scsi_bufflen(cmd), residual)); cmd->result = DID_ERROR << 16; break; } } else if (scsi_status != SAM_STAT_TASK_SET_FULL && scsi_status != SAM_STAT_BUSY) { /* * The firmware reports UNDERRUN, but the target does * not report it: * * scsi_status | host_byte device_byte * | (19:16) (7:0) * ============= | ========= =========== * TASK_SET_FULL | DID_OK scsi_status * BUSY | DID_OK scsi_status * ALL OTHERS | DID_ERROR scsi_status * * Note: If scsi_status is task set full or busy, * then this else if would fall thru to check the * scsi_status and return DID_OK. */ DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d:%llu: %s: Dropped frame(s) detected (0x%x of 0x%x bytes).\n", ha->host_no, cmd->device->channel, cmd->device->id, cmd->device->lun, __func__, residual, scsi_bufflen(cmd))); cmd->result = DID_ERROR << 16 | scsi_status; goto check_scsi_status; } cmd->result = DID_OK << 16 | scsi_status; check_scsi_status: if (scsi_status == SAM_STAT_CHECK_CONDITION) qla4xxx_copy_sense(ha, sts_entry, srb); break; case SCS_DEVICE_LOGGED_OUT: case SCS_DEVICE_UNAVAILABLE: DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%llu: SCS_DEVICE " "state: 0x%x\n", ha->host_no, cmd->device->channel, cmd->device->id, cmd->device->lun, sts_entry->completionStatus)); /* * Mark device missing so that we won't continue to * send I/O to this device. We should get a ddb * state change AEN soon. */ if (iscsi_is_session_online(ddb_entry->sess)) qla4xxx_mark_device_missing(ddb_entry->sess); cmd->result = DID_TRANSPORT_DISRUPTED << 16; break; case SCS_QUEUE_FULL: /* * SCSI Mid-Layer handles device queue full */ cmd->result = DID_OK << 16 | sts_entry->scsiStatus; DEBUG2(printk("scsi%ld:%d:%llu: %s: QUEUE FULL detected " "compl=%02x, scsi=%02x, state=%02x, iFlags=%02x," " iResp=%02x\n", ha->host_no, cmd->device->id, cmd->device->lun, __func__, sts_entry->completionStatus, sts_entry->scsiStatus, sts_entry->state_flags, sts_entry->iscsiFlags, sts_entry->iscsiResponse)); break; default: cmd->result = DID_ERROR << 16; break; } status_entry_exit: /* complete the request, if not waiting for status_continuation pkt */ srb->cc_stat = sts_entry->completionStatus; if (ha->status_srb == NULL) kref_put(&srb->srb_ref, qla4xxx_srb_compl); } /** * qla4xxx_passthru_status_entry - processes passthru status IOCBs (0x3C) * @ha: Pointer to host adapter structure. * @sts_entry: Pointer to status entry structure. **/ static void qla4xxx_passthru_status_entry(struct scsi_qla_host *ha, struct passthru_status *sts_entry) { struct iscsi_task *task; struct ddb_entry *ddb_entry; struct ql4_task_data *task_data; struct iscsi_cls_conn *cls_conn; struct iscsi_conn *conn; itt_t itt; uint32_t fw_ddb_index; itt = sts_entry->handle; fw_ddb_index = le32_to_cpu(sts_entry->target); ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index); if (ddb_entry == NULL) { ql4_printk(KERN_ERR, ha, "%s: Invalid target index = 0x%x\n", __func__, sts_entry->target); return; } cls_conn = ddb_entry->conn; conn = cls_conn->dd_data; spin_lock(&conn->session->back_lock); task = iscsi_itt_to_task(conn, itt); spin_unlock(&conn->session->back_lock); if (task == NULL) { ql4_printk(KERN_ERR, ha, "%s: Task is NULL\n", __func__); return; } task_data = task->dd_data; memcpy(&task_data->sts, sts_entry, sizeof(struct passthru_status)); ha->iocb_cnt -= task_data->iocb_req_cnt; queue_work(ha->task_wq, &task_data->task_work); } static struct mrb *qla4xxx_del_mrb_from_active_array(struct scsi_qla_host *ha, uint32_t index) { struct mrb *mrb = NULL; /* validate handle and remove from active array */ if (index >= MAX_MRB) return mrb; mrb = ha->active_mrb_array[index]; ha->active_mrb_array[index] = NULL; if (!mrb) return mrb; /* update counters */ ha->iocb_cnt -= mrb->iocb_cnt; return mrb; } static void qla4xxx_mbox_status_entry(struct scsi_qla_host *ha, struct mbox_status_iocb *mbox_sts_entry) { struct mrb *mrb; uint32_t status; uint32_t data_size; mrb = qla4xxx_del_mrb_from_active_array(ha, le32_to_cpu(mbox_sts_entry->handle)); if (mrb == NULL) { ql4_printk(KERN_WARNING, ha, "%s: mrb[%d] is null\n", __func__, mbox_sts_entry->handle); return; } switch (mrb->mbox_cmd) { case MBOX_CMD_PING: DEBUG2(ql4_printk(KERN_INFO, ha, "%s: mbox_cmd = 0x%x, " "mbox_sts[0] = 0x%x, mbox_sts[6] = 0x%x\n", __func__, mrb->mbox_cmd, mbox_sts_entry->out_mbox[0], mbox_sts_entry->out_mbox[6])); if (mbox_sts_entry->out_mbox[0] == MBOX_STS_COMMAND_COMPLETE) status = ISCSI_PING_SUCCESS; else status = mbox_sts_entry->out_mbox[6]; data_size = sizeof(mbox_sts_entry->out_mbox); qla4xxx_post_ping_evt_work(ha, status, mrb->pid, data_size, (uint8_t *) mbox_sts_entry->out_mbox); break; default: DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: invalid mbox_cmd = " "0x%x\n", __func__, mrb->mbox_cmd)); } kfree(mrb); return; } /** * qla4xxx_process_response_queue - process response queue completions * @ha: Pointer to host adapter structure. * * This routine process response queue completions in interrupt context. * Hardware_lock locked upon entry **/ void qla4xxx_process_response_queue(struct scsi_qla_host *ha) { struct srb *srb = NULL; struct status_entry *sts_entry; /* Process all responses from response queue */ while ((ha->response_ptr->signature != RESPONSE_PROCESSED)) { sts_entry = (struct status_entry *) ha->response_ptr; /* Advance pointers for next entry */ if (ha->response_out == (RESPONSE_QUEUE_DEPTH - 1)) { ha->response_out = 0; ha->response_ptr = ha->response_ring; } else { ha->response_out++; ha->response_ptr++; } /* process entry */ switch (sts_entry->hdr.entryType) { case ET_STATUS: /* Common status */ qla4xxx_status_entry(ha, sts_entry); break; case ET_PASSTHRU_STATUS: if (sts_entry->hdr.systemDefined == SD_ISCSI_PDU) qla4xxx_passthru_status_entry(ha, (struct passthru_status *)sts_entry); else ql4_printk(KERN_ERR, ha, "%s: Invalid status received\n", __func__); break; case ET_STATUS_CONTINUATION: qla4xxx_status_cont_entry(ha, (struct status_cont_entry *) sts_entry); break; case ET_COMMAND: /* ISP device queue is full. Command not * accepted by ISP. Queue command for * later */ srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry-> handle)); if (srb == NULL) goto exit_prq_invalid_handle; DEBUG2(printk("scsi%ld: %s: FW device queue full, " "srb %p\n", ha->host_no, __func__, srb)); /* ETRY normally by sending it back with * DID_BUS_BUSY */ srb->cmd->result = DID_BUS_BUSY << 16; kref_put(&srb->srb_ref, qla4xxx_srb_compl); break; case ET_CONTINUE: /* Just throw away the continuation entries */ DEBUG2(printk("scsi%ld: %s: Continuation entry - " "ignoring\n", ha->host_no, __func__)); break; case ET_MBOX_STATUS: DEBUG2(ql4_printk(KERN_INFO, ha, "%s: mbox status IOCB\n", __func__)); qla4xxx_mbox_status_entry(ha, (struct mbox_status_iocb *)sts_entry); break; default: /* * Invalid entry in response queue, reset RISC * firmware. */ DEBUG2(printk("scsi%ld: %s: Invalid entry %x in " "response queue \n", ha->host_no, __func__, sts_entry->hdr.entryType)); goto exit_prq_error; } ((struct response *)sts_entry)->signature = RESPONSE_PROCESSED; wmb(); } /* * Tell ISP we're done with response(s). This also clears the interrupt. */ ha->isp_ops->complete_iocb(ha); return; exit_prq_invalid_handle: DEBUG2(printk("scsi%ld: %s: Invalid handle(srb)=%p type=%x IOCS=%x\n", ha->host_no, __func__, srb, sts_entry->hdr.entryType, sts_entry->completionStatus)); exit_prq_error: ha->isp_ops->complete_iocb(ha); set_bit(DPC_RESET_HA, &ha->dpc_flags); } /** * qla4_83xx_loopback_in_progress: Is loopback in progress? * @ha: Pointer to host adapter structure. * returns: 1 = loopback in progress, 0 = loopback not in progress **/ static int qla4_83xx_loopback_in_progress(struct scsi_qla_host *ha) { int rval = 1; if (is_qla8032(ha) || is_qla8042(ha)) { if ((ha->idc_info.info2 & ENABLE_INTERNAL_LOOPBACK) || (ha->idc_info.info2 & ENABLE_EXTERNAL_LOOPBACK)) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Loopback diagnostics in progress\n", __func__)); rval = 1; } else { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Loopback diagnostics not in progress\n", __func__)); rval = 0; } } return rval; } static void qla4xxx_update_ipaddr_state(struct scsi_qla_host *ha, uint32_t ipaddr_idx, uint32_t ipaddr_fw_state) { uint8_t ipaddr_state; uint8_t ip_idx; ip_idx = ipaddr_idx & 0xF; ipaddr_state = qla4xxx_set_ipaddr_state((uint8_t)ipaddr_fw_state); switch (ip_idx) { case 0: ha->ip_config.ipv4_addr_state = ipaddr_state; break; case 1: ha->ip_config.ipv6_link_local_state = ipaddr_state; break; case 2: ha->ip_config.ipv6_addr0_state = ipaddr_state; break; case 3: ha->ip_config.ipv6_addr1_state = ipaddr_state; break; default: ql4_printk(KERN_INFO, ha, "%s: Invalid IPADDR index %d\n", __func__, ip_idx); } } static void qla4xxx_default_router_changed(struct scsi_qla_host *ha, uint32_t *mbox_sts) { memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[0], &mbox_sts[2], sizeof(uint32_t)); memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[1], &mbox_sts[3], sizeof(uint32_t)); memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[2], &mbox_sts[4], sizeof(uint32_t)); memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[3], &mbox_sts[5], sizeof(uint32_t)); } /** * qla4xxx_isr_decode_mailbox - decodes mailbox status * @ha: Pointer to host adapter structure. * @mbox_status: Mailbox status. * * This routine decodes the mailbox status during the ISR. * Hardware_lock locked upon entry. runs in interrupt context. **/ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha, uint32_t mbox_status) { int i; uint32_t mbox_sts[MBOX_AEN_REG_COUNT]; __le32 __iomem *mailbox_out; uint32_t opcode = 0; if (is_qla8032(ha) || is_qla8042(ha)) mailbox_out = &ha->qla4_83xx_reg->mailbox_out[0]; else if (is_qla8022(ha)) mailbox_out = &ha->qla4_82xx_reg->mailbox_out[0]; else mailbox_out = &ha->reg->mailbox[0]; if ((mbox_status == MBOX_STS_BUSY) || (mbox_status == MBOX_STS_INTERMEDIATE_COMPLETION) || (mbox_status >> 12 == MBOX_COMPLETION_STATUS)) { ha->mbox_status[0] = mbox_status; if (test_bit(AF_MBOX_COMMAND, &ha->flags)) { /* * Copy all mailbox registers to a temporary * location and set mailbox command done flag */ for (i = 0; i < ha->mbox_status_count; i++) ha->mbox_status[i] = readl(&mailbox_out[i]); set_bit(AF_MBOX_COMMAND_DONE, &ha->flags); if (test_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags)) complete(&ha->mbx_intr_comp); } } else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) { for (i = 0; i < MBOX_AEN_REG_COUNT; i++) mbox_sts[i] = readl(&mailbox_out[i]); /* Immediately process the AENs that don't require much work. * Only queue the database_changed AENs */ if (ha->aen_log.count < MAX_AEN_ENTRIES) { for (i = 0; i < MBOX_AEN_REG_COUNT; i++) ha->aen_log.entry[ha->aen_log.count].mbox_sts[i] = mbox_sts[i]; ha->aen_log.count++; } switch (mbox_status) { case MBOX_ASTS_SYSTEM_ERROR: /* Log Mailbox registers */ ql4_printk(KERN_INFO, ha, "%s: System Err\n", __func__); qla4xxx_dump_registers(ha); if ((is_qla8022(ha) && ql4xdontresethba) || ((is_qla8032(ha) || is_qla8042(ha)) && qla4_83xx_idc_dontreset(ha))) { DEBUG2(printk("scsi%ld: %s:Don't Reset HBA\n", ha->host_no, __func__)); } else { set_bit(AF_GET_CRASH_RECORD, &ha->flags); set_bit(DPC_RESET_HA, &ha->dpc_flags); } break; case MBOX_ASTS_REQUEST_TRANSFER_ERROR: case MBOX_ASTS_RESPONSE_TRANSFER_ERROR: case MBOX_ASTS_NVRAM_INVALID: case MBOX_ASTS_IP_ADDRESS_CHANGED: case MBOX_ASTS_DHCP_LEASE_EXPIRED: DEBUG2(printk("scsi%ld: AEN %04x, ERROR Status, " "Reset HA\n", ha->host_no, mbox_status)); if (is_qla80XX(ha)) set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); else set_bit(DPC_RESET_HA, &ha->dpc_flags); break; case MBOX_ASTS_LINK_UP: set_bit(AF_LINK_UP, &ha->flags); if (test_bit(AF_INIT_DONE, &ha->flags)) set_bit(DPC_LINK_CHANGED, &ha->dpc_flags); ql4_printk(KERN_INFO, ha, "%s: LINK UP\n", __func__); qla4xxx_post_aen_work(ha, ISCSI_EVENT_LINKUP, sizeof(mbox_sts), (uint8_t *) mbox_sts); if ((is_qla8032(ha) || is_qla8042(ha)) && ha->notify_link_up_comp) complete(&ha->link_up_comp); break; case MBOX_ASTS_LINK_DOWN: clear_bit(AF_LINK_UP, &ha->flags); if (test_bit(AF_INIT_DONE, &ha->flags)) { set_bit(DPC_LINK_CHANGED, &ha->dpc_flags); qla4xxx_wake_dpc(ha); } ql4_printk(KERN_INFO, ha, "%s: LINK DOWN\n", __func__); qla4xxx_post_aen_work(ha, ISCSI_EVENT_LINKDOWN, sizeof(mbox_sts), (uint8_t *) mbox_sts); break; case MBOX_ASTS_HEARTBEAT: ha->seconds_since_last_heartbeat = 0; break; case MBOX_ASTS_DHCP_LEASE_ACQUIRED: DEBUG2(printk("scsi%ld: AEN %04x DHCP LEASE " "ACQUIRED\n", ha->host_no, mbox_status)); set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags); break; case MBOX_ASTS_PROTOCOL_STATISTIC_ALARM: case MBOX_ASTS_SCSI_COMMAND_PDU_REJECTED: /* Target * mode * only */ case MBOX_ASTS_UNSOLICITED_PDU_RECEIVED: /* Connection mode */ case MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR: case MBOX_ASTS_SUBNET_STATE_CHANGE: case MBOX_ASTS_DUPLICATE_IP: /* No action */ DEBUG2(printk("scsi%ld: AEN %04x\n", ha->host_no, mbox_status)); break; case MBOX_ASTS_IP_ADDR_STATE_CHANGED: printk("scsi%ld: AEN %04x, mbox_sts[2]=%04x, " "mbox_sts[3]=%04x\n", ha->host_no, mbox_sts[0], mbox_sts[2], mbox_sts[3]); qla4xxx_update_ipaddr_state(ha, mbox_sts[5], mbox_sts[3]); /* mbox_sts[2] = Old ACB state * mbox_sts[3] = new ACB state */ if ((mbox_sts[3] == IP_ADDRSTATE_PREFERRED) && ((mbox_sts[2] == IP_ADDRSTATE_TENTATIVE) || (mbox_sts[2] == IP_ADDRSTATE_ACQUIRING))) { set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags); } else if ((mbox_sts[3] == IP_ADDRSTATE_ACQUIRING) && (mbox_sts[2] == IP_ADDRSTATE_PREFERRED)) { if (is_qla80XX(ha)) set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); else set_bit(DPC_RESET_HA, &ha->dpc_flags); } else if (mbox_sts[3] == IP_ADDRSTATE_DISABLING) { ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ACB in disabling state\n", ha->host_no, __func__); } else if (mbox_sts[3] == IP_ADDRSTATE_UNCONFIGURED) { complete(&ha->disable_acb_comp); ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ACB state unconfigured\n", ha->host_no, __func__); } break; case MBOX_ASTS_IPV6_LINK_MTU_CHANGE: case MBOX_ASTS_IPV6_AUTO_PREFIX_IGNORED: case MBOX_ASTS_IPV6_ND_LOCAL_PREFIX_IGNORED: /* No action */ DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: AEN %04x\n", ha->host_no, mbox_status)); break; case MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD: DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: AEN %04x, IPv6 ERROR, " "mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3}=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n", ha->host_no, mbox_sts[0], mbox_sts[1], mbox_sts[2], mbox_sts[3], mbox_sts[4], mbox_sts[5])); break; case MBOX_ASTS_MAC_ADDRESS_CHANGED: case MBOX_ASTS_DNS: /* No action */ DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x, " "mbox_sts[1]=%04x, mbox_sts[2]=%04x\n", ha->host_no, mbox_sts[0], mbox_sts[1], mbox_sts[2])); break; case MBOX_ASTS_SELF_TEST_FAILED: case MBOX_ASTS_LOGIN_FAILED: /* No action */ DEBUG2(printk("scsi%ld: AEN %04x, mbox_sts[1]=%04x, " "mbox_sts[2]=%04x, mbox_sts[3]=%04x\n", ha->host_no, mbox_sts[0], mbox_sts[1], mbox_sts[2], mbox_sts[3])); break; case MBOX_ASTS_DATABASE_CHANGED: /* Queue AEN information and process it in the DPC * routine */ if (ha->aen_q_count > 0) { /* decrement available counter */ ha->aen_q_count--; for (i = 0; i < MBOX_AEN_REG_COUNT; i++) ha->aen_q[ha->aen_in].mbox_sts[i] = mbox_sts[i]; /* print debug message */ DEBUG2(printk("scsi%ld: AEN[%d] %04x queued " "mb1:0x%x mb2:0x%x mb3:0x%x " "mb4:0x%x mb5:0x%x\n", ha->host_no, ha->aen_in, mbox_sts[0], mbox_sts[1], mbox_sts[2], mbox_sts[3], mbox_sts[4], mbox_sts[5])); /* advance pointer */ ha->aen_in++; if (ha->aen_in == MAX_AEN_ENTRIES) ha->aen_in = 0; /* The DPC routine will process the aen */ set_bit(DPC_AEN, &ha->dpc_flags); } else { DEBUG2(printk("scsi%ld: %s: aen %04x, queue " "overflowed! AEN LOST!!\n", ha->host_no, __func__, mbox_sts[0])); DEBUG2(printk("scsi%ld: DUMP AEN QUEUE\n", ha->host_no)); for (i = 0; i < MAX_AEN_ENTRIES; i++) { DEBUG2(printk("AEN[%d] %04x %04x %04x " "%04x\n", i, mbox_sts[0], mbox_sts[1], mbox_sts[2], mbox_sts[3])); } } break; case MBOX_ASTS_TXSCVR_INSERTED: DEBUG2(printk(KERN_WARNING "scsi%ld: AEN %04x Transceiver" " inserted\n", ha->host_no, mbox_sts[0])); break; case MBOX_ASTS_TXSCVR_REMOVED: DEBUG2(printk(KERN_WARNING "scsi%ld: AEN %04x Transceiver" " removed\n", ha->host_no, mbox_sts[0])); break; case MBOX_ASTS_IDC_REQUEST_NOTIFICATION: if (is_qla8032(ha) || is_qla8042(ha)) { DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x\n", ha->host_no, mbox_sts[0], mbox_sts[1], mbox_sts[2], mbox_sts[3], mbox_sts[4])); opcode = mbox_sts[1] >> 16; if ((opcode == MBOX_CMD_SET_PORT_CONFIG) || (opcode == MBOX_CMD_PORT_RESET)) { set_bit(DPC_POST_IDC_ACK, &ha->dpc_flags); ha->idc_info.request_desc = mbox_sts[1]; ha->idc_info.info1 = mbox_sts[2]; ha->idc_info.info2 = mbox_sts[3]; ha->idc_info.info3 = mbox_sts[4]; qla4xxx_wake_dpc(ha); } } break; case MBOX_ASTS_IDC_COMPLETE: if (is_qla8032(ha) || is_qla8042(ha)) { DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x\n", ha->host_no, mbox_sts[0], mbox_sts[1], mbox_sts[2], mbox_sts[3], mbox_sts[4])); DEBUG2(ql4_printk(KERN_INFO, ha, "scsi:%ld: AEN %04x IDC Complete notification\n", ha->host_no, mbox_sts[0])); opcode = mbox_sts[1] >> 16; if (ha->notify_idc_comp) complete(&ha->idc_comp); if ((opcode == MBOX_CMD_SET_PORT_CONFIG) || (opcode == MBOX_CMD_PORT_RESET)) ha->idc_info.info2 = mbox_sts[3]; if (qla4_83xx_loopback_in_progress(ha)) { set_bit(AF_LOOPBACK, &ha->flags); } else { clear_bit(AF_LOOPBACK, &ha->flags); if (ha->saved_acb) set_bit(DPC_RESTORE_ACB, &ha->dpc_flags); } qla4xxx_wake_dpc(ha); } break; case MBOX_ASTS_IPV6_DEFAULT_ROUTER_CHANGED: DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n", ha->host_no, mbox_sts[0], mbox_sts[1], mbox_sts[2], mbox_sts[3], mbox_sts[4], mbox_sts[5])); DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: AEN %04x Received IPv6 default router changed notification\n", ha->host_no, mbox_sts[0])); qla4xxx_default_router_changed(ha, mbox_sts); break; case MBOX_ASTS_IDC_TIME_EXTEND_NOTIFICATION: DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n", ha->host_no, mbox_sts[0], mbox_sts[1], mbox_sts[2], mbox_sts[3], mbox_sts[4], mbox_sts[5])); DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: AEN %04x Received IDC Extend Timeout notification\n", ha->host_no, mbox_sts[0])); /* new IDC timeout */ ha->idc_extend_tmo = mbox_sts[1]; break; case MBOX_ASTS_INITIALIZATION_FAILED: DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: AEN %04x, mbox_sts[3]=%08x\n", ha->host_no, mbox_sts[0], mbox_sts[3])); break; case MBOX_ASTS_SYSTEM_WARNING_EVENT: DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n", ha->host_no, mbox_sts[0], mbox_sts[1], mbox_sts[2], mbox_sts[3], mbox_sts[4], mbox_sts[5])); break; case MBOX_ASTS_DCBX_CONF_CHANGE: DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n", ha->host_no, mbox_sts[0], mbox_sts[1], mbox_sts[2], mbox_sts[3], mbox_sts[4], mbox_sts[5])); DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: AEN %04x Received DCBX configuration changed notification\n", ha->host_no, mbox_sts[0])); break; default: DEBUG2(printk(KERN_WARNING "scsi%ld: AEN %04x UNKNOWN\n", ha->host_no, mbox_sts[0])); break; } } else { DEBUG2(printk("scsi%ld: Unknown mailbox status %08X\n", ha->host_no, mbox_status)); ha->mbox_status[0] = mbox_status; } } void qla4_83xx_interrupt_service_routine(struct scsi_qla_host *ha, uint32_t intr_status) { /* Process mailbox/asynch event interrupt.*/ if (intr_status) { qla4xxx_isr_decode_mailbox(ha, readl(&ha->qla4_83xx_reg->mailbox_out[0])); /* clear the interrupt */ writel(0, &ha->qla4_83xx_reg->risc_intr); } else { qla4xxx_process_response_queue(ha); } /* clear the interrupt */ writel(0, &ha->qla4_83xx_reg->mb_int_mask); } /** * qla4_82xx_interrupt_service_routine - isr * @ha: pointer to host adapter structure. * @intr_status: Local interrupt status/type. * * This is the main interrupt service routine. * hardware_lock locked upon entry. runs in interrupt context. **/ void qla4_82xx_interrupt_service_routine(struct scsi_qla_host *ha, uint32_t intr_status) { /* Process response queue interrupt. */ if ((intr_status & HSRX_RISC_IOCB_INT) && test_bit(AF_INIT_DONE, &ha->flags)) qla4xxx_process_response_queue(ha); /* Process mailbox/asynch event interrupt.*/ if (intr_status & HSRX_RISC_MB_INT) qla4xxx_isr_decode_mailbox(ha, readl(&ha->qla4_82xx_reg->mailbox_out[0])); /* clear the interrupt */ writel(0, &ha->qla4_82xx_reg->host_int); readl(&ha->qla4_82xx_reg->host_int); } /** * qla4xxx_interrupt_service_routine - isr * @ha: pointer to host adapter structure. * @intr_status: Local interrupt status/type. * * This is the main interrupt service routine. * hardware_lock locked upon entry. runs in interrupt context. **/ void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha, uint32_t intr_status) { /* Process response queue interrupt. */ if (intr_status & CSR_SCSI_COMPLETION_INTR) qla4xxx_process_response_queue(ha); /* Process mailbox/asynch event interrupt.*/ if (intr_status & CSR_SCSI_PROCESSOR_INTR) { qla4xxx_isr_decode_mailbox(ha, readl(&ha->reg->mailbox[0])); /* Clear Mailbox Interrupt */ writel(set_rmask(CSR_SCSI_PROCESSOR_INTR), &ha->reg->ctrl_status); readl(&ha->reg->ctrl_status); } } /** * qla4_82xx_spurious_interrupt - processes spurious interrupt * @ha: pointer to host adapter structure. * @reqs_count: . * **/ static void qla4_82xx_spurious_interrupt(struct scsi_qla_host *ha, uint8_t reqs_count) { if (reqs_count) return; DEBUG2(ql4_printk(KERN_INFO, ha, "Spurious Interrupt\n")); if (is_qla8022(ha)) { writel(0, &ha->qla4_82xx_reg->host_int); if (!ha->pdev->msi_enabled && !ha->pdev->msix_enabled) qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff); } ha->spurious_int_count++; } /** * qla4xxx_intr_handler - hardware interrupt handler. * @irq: Unused * @dev_id: Pointer to host adapter structure **/ irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id) { struct scsi_qla_host *ha; uint32_t intr_status; unsigned long flags = 0; uint8_t reqs_count = 0; ha = (struct scsi_qla_host *) dev_id; if (!ha) { DEBUG2(printk(KERN_INFO "qla4xxx: Interrupt with NULL host ptr\n")); return IRQ_NONE; } spin_lock_irqsave(&ha->hardware_lock, flags); ha->isr_count++; /* * Repeatedly service interrupts up to a maximum of * MAX_REQS_SERVICED_PER_INTR */ while (1) { /* * Read interrupt status */ if (ha->isp_ops->rd_shdw_rsp_q_in(ha) != ha->response_out) intr_status = CSR_SCSI_COMPLETION_INTR; else intr_status = readl(&ha->reg->ctrl_status); if ((intr_status & (CSR_SCSI_RESET_INTR|CSR_FATAL_ERROR|INTR_PENDING)) == 0) { if (reqs_count == 0) ha->spurious_int_count++; break; } if (intr_status & CSR_FATAL_ERROR) { DEBUG2(printk(KERN_INFO "scsi%ld: Fatal Error, " "Status 0x%04x\n", ha->host_no, readl(isp_port_error_status (ha)))); /* Issue Soft Reset to clear this error condition. * This will prevent the RISC from repeatedly * interrupting the driver; thus, allowing the DPC to * get scheduled to continue error recovery. * NOTE: Disabling RISC interrupts does not work in * this case, as CSR_FATAL_ERROR overrides * CSR_SCSI_INTR_ENABLE */ if ((readl(&ha->reg->ctrl_status) & CSR_SCSI_RESET_INTR) == 0) { writel(set_rmask(CSR_SOFT_RESET), &ha->reg->ctrl_status); readl(&ha->reg->ctrl_status); } writel(set_rmask(CSR_FATAL_ERROR), &ha->reg->ctrl_status); readl(&ha->reg->ctrl_status); __qla4xxx_disable_intrs(ha); set_bit(DPC_RESET_HA, &ha->dpc_flags); break; } else if (intr_status & CSR_SCSI_RESET_INTR) { clear_bit(AF_ONLINE, &ha->flags); __qla4xxx_disable_intrs(ha); writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status); readl(&ha->reg->ctrl_status); if (!test_bit(AF_HA_REMOVAL, &ha->flags)) set_bit(DPC_RESET_HA_INTR, &ha->dpc_flags); break; } else if (intr_status & INTR_PENDING) { ha->isp_ops->interrupt_service_routine(ha, intr_status); ha->total_io_count++; if (++reqs_count == MAX_REQS_SERVICED_PER_INTR) break; } } spin_unlock_irqrestore(&ha->hardware_lock, flags); return IRQ_HANDLED; } /** * qla4_82xx_intr_handler - hardware interrupt handler. * @irq: Unused * @dev_id: Pointer to host adapter structure **/ irqreturn_t qla4_82xx_intr_handler(int irq, void *dev_id) { struct scsi_qla_host *ha = dev_id; uint32_t intr_status; uint32_t status; unsigned long flags = 0; uint8_t reqs_count = 0; if (unlikely(pci_channel_offline(ha->pdev))) return IRQ_HANDLED; ha->isr_count++; status = qla4_82xx_rd_32(ha, ISR_INT_VECTOR); if (!(status & ha->nx_legacy_intr.int_vec_bit)) return IRQ_NONE; status = qla4_82xx_rd_32(ha, ISR_INT_STATE_REG); if (!ISR_IS_LEGACY_INTR_TRIGGERED(status)) { DEBUG7(ql4_printk(KERN_INFO, ha, "%s legacy Int not triggered\n", __func__)); return IRQ_NONE; } /* clear the interrupt */ qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff); /* read twice to ensure write is flushed */ qla4_82xx_rd_32(ha, ISR_INT_VECTOR); qla4_82xx_rd_32(ha, ISR_INT_VECTOR); spin_lock_irqsave(&ha->hardware_lock, flags); while (1) { if (!(readl(&ha->qla4_82xx_reg->host_int) & ISRX_82XX_RISC_INT)) { qla4_82xx_spurious_interrupt(ha, reqs_count); break; } intr_status = readl(&ha->qla4_82xx_reg->host_status); if ((intr_status & (HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0) { qla4_82xx_spurious_interrupt(ha, reqs_count); break; } ha->isp_ops->interrupt_service_routine(ha, intr_status); /* Enable Interrupt */ qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff); if (++reqs_count == MAX_REQS_SERVICED_PER_INTR) break; } spin_unlock_irqrestore(&ha->hardware_lock, flags); return IRQ_HANDLED; } #define LEG_INT_PTR_B31 (1 << 31) #define LEG_INT_PTR_B30 (1 << 30) #define PF_BITS_MASK (0xF << 16) /** * qla4_83xx_intr_handler - hardware interrupt handler. * @irq: Unused * @dev_id: Pointer to host adapter structure **/ irqreturn_t qla4_83xx_intr_handler(int irq, void *dev_id) { struct scsi_qla_host *ha = dev_id; uint32_t leg_int_ptr = 0; unsigned long flags = 0; ha->isr_count++; leg_int_ptr = readl(&ha->qla4_83xx_reg->leg_int_ptr); /* Legacy interrupt is valid if bit31 of leg_int_ptr is set */ if (!(leg_int_ptr & LEG_INT_PTR_B31)) { DEBUG7(ql4_printk(KERN_ERR, ha, "%s: Legacy Interrupt Bit 31 not set, spurious interrupt!\n", __func__)); return IRQ_NONE; } /* Validate the PCIE function ID set in leg_int_ptr bits [19..16] */ if ((leg_int_ptr & PF_BITS_MASK) != ha->pf_bit) { DEBUG7(ql4_printk(KERN_ERR, ha, "%s: Incorrect function ID 0x%x in legacy interrupt register, ha->pf_bit = 0x%x\n", __func__, (leg_int_ptr & PF_BITS_MASK), ha->pf_bit)); return IRQ_NONE; } /* To de-assert legacy interrupt, write 0 to Legacy Interrupt Trigger * Control register and poll till Legacy Interrupt Pointer register * bit30 is 0. */ writel(0, &ha->qla4_83xx_reg->leg_int_trig); do { leg_int_ptr = readl(&ha->qla4_83xx_reg->leg_int_ptr); if ((leg_int_ptr & PF_BITS_MASK) != ha->pf_bit) break; } while (leg_int_ptr & LEG_INT_PTR_B30); spin_lock_irqsave(&ha->hardware_lock, flags); leg_int_ptr = readl(&ha->qla4_83xx_reg->risc_intr); ha->isp_ops->interrupt_service_routine(ha, leg_int_ptr); spin_unlock_irqrestore(&ha->hardware_lock, flags); return IRQ_HANDLED; } irqreturn_t qla4_8xxx_msi_handler(int irq, void *dev_id) { struct scsi_qla_host *ha; ha = (struct scsi_qla_host *) dev_id; if (!ha) { DEBUG2(printk(KERN_INFO "qla4xxx: MSIX: Interrupt with NULL host ptr\n")); return IRQ_NONE; } ha->isr_count++; /* clear the interrupt */ qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff); /* read twice to ensure write is flushed */ qla4_82xx_rd_32(ha, ISR_INT_VECTOR); qla4_82xx_rd_32(ha, ISR_INT_VECTOR); return qla4_8xxx_default_intr_handler(irq, dev_id); } static irqreturn_t qla4_83xx_mailbox_intr_handler(int irq, void *dev_id) { struct scsi_qla_host *ha = dev_id; unsigned long flags; uint32_t ival = 0; spin_lock_irqsave(&ha->hardware_lock, flags); ival = readl(&ha->qla4_83xx_reg->risc_intr); if (ival == 0) { ql4_printk(KERN_INFO, ha, "%s: It is a spurious mailbox interrupt!\n", __func__); ival = readl(&ha->qla4_83xx_reg->mb_int_mask); ival &= ~INT_MASK_FW_MB; writel(ival, &ha->qla4_83xx_reg->mb_int_mask); goto exit; } qla4xxx_isr_decode_mailbox(ha, readl(&ha->qla4_83xx_reg->mailbox_out[0])); writel(0, &ha->qla4_83xx_reg->risc_intr); ival = readl(&ha->qla4_83xx_reg->mb_int_mask); ival &= ~INT_MASK_FW_MB; writel(ival, &ha->qla4_83xx_reg->mb_int_mask); ha->isr_count++; exit: spin_unlock_irqrestore(&ha->hardware_lock, flags); return IRQ_HANDLED; } /** * qla4_8xxx_default_intr_handler - hardware interrupt handler. * @irq: Unused * @dev_id: Pointer to host adapter structure * * This interrupt handler is called directly for MSI-X, and * called indirectly for MSI. **/ irqreturn_t qla4_8xxx_default_intr_handler(int irq, void *dev_id) { struct scsi_qla_host *ha = dev_id; unsigned long flags; uint32_t intr_status; uint8_t reqs_count = 0; if (is_qla8032(ha) || is_qla8042(ha)) { qla4_83xx_mailbox_intr_handler(irq, dev_id); } else { spin_lock_irqsave(&ha->hardware_lock, flags); while (1) { if (!(readl(&ha->qla4_82xx_reg->host_int) & ISRX_82XX_RISC_INT)) { qla4_82xx_spurious_interrupt(ha, reqs_count); break; } intr_status = readl(&ha->qla4_82xx_reg->host_status); if ((intr_status & (HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0) { qla4_82xx_spurious_interrupt(ha, reqs_count); break; } ha->isp_ops->interrupt_service_routine(ha, intr_status); if (++reqs_count == MAX_REQS_SERVICED_PER_INTR) break; } ha->isr_count++; spin_unlock_irqrestore(&ha->hardware_lock, flags); } return IRQ_HANDLED; } irqreturn_t qla4_8xxx_msix_rsp_q(int irq, void *dev_id) { struct scsi_qla_host *ha = dev_id; unsigned long flags; int intr_status; uint32_t ival = 0; spin_lock_irqsave(&ha->hardware_lock, flags); if (is_qla8032(ha) || is_qla8042(ha)) { ival = readl(&ha->qla4_83xx_reg->iocb_int_mask); if (ival == 0) { ql4_printk(KERN_INFO, ha, "%s: It is a spurious iocb interrupt!\n", __func__); goto exit_msix_rsp_q; } qla4xxx_process_response_queue(ha); writel(0, &ha->qla4_83xx_reg->iocb_int_mask); } else { intr_status = readl(&ha->qla4_82xx_reg->host_status); if (intr_status & HSRX_RISC_IOCB_INT) { qla4xxx_process_response_queue(ha); writel(0, &ha->qla4_82xx_reg->host_int); } else { ql4_printk(KERN_INFO, ha, "%s: spurious iocb interrupt...\n", __func__); goto exit_msix_rsp_q; } } ha->isr_count++; exit_msix_rsp_q: spin_unlock_irqrestore(&ha->hardware_lock, flags); return IRQ_HANDLED; } /** * qla4xxx_process_aen - processes AENs generated by firmware * @ha: pointer to host adapter structure. * @process_aen: type of AENs to process * * Processes specific types of Asynchronous Events generated by firmware. * The type of AENs to process is specified by process_aen and can be * PROCESS_ALL_AENS 0 * FLUSH_DDB_CHANGED_AENS 1 * RELOGIN_DDB_CHANGED_AENS 2 **/ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen) { uint32_t mbox_sts[MBOX_AEN_REG_COUNT]; struct aen *aen; int i; unsigned long flags; spin_lock_irqsave(&ha->hardware_lock, flags); while (ha->aen_out != ha->aen_in) { aen = &ha->aen_q[ha->aen_out]; /* copy aen information to local structure */ for (i = 0; i < MBOX_AEN_REG_COUNT; i++) mbox_sts[i] = aen->mbox_sts[i]; ha->aen_q_count++; ha->aen_out++; if (ha->aen_out == MAX_AEN_ENTRIES) ha->aen_out = 0; spin_unlock_irqrestore(&ha->hardware_lock, flags); DEBUG2(printk("qla4xxx(%ld): AEN[%d]=0x%08x, mbx1=0x%08x mbx2=0x%08x" " mbx3=0x%08x mbx4=0x%08x\n", ha->host_no, (ha->aen_out ? (ha->aen_out-1): (MAX_AEN_ENTRIES-1)), mbox_sts[0], mbox_sts[1], mbox_sts[2], mbox_sts[3], mbox_sts[4])); switch (mbox_sts[0]) { case MBOX_ASTS_DATABASE_CHANGED: switch (process_aen) { case FLUSH_DDB_CHANGED_AENS: DEBUG2(printk("scsi%ld: AEN[%d] %04x, index " "[%d] state=%04x FLUSHED!\n", ha->host_no, ha->aen_out, mbox_sts[0], mbox_sts[2], mbox_sts[3])); break; case PROCESS_ALL_AENS: default: /* Specific device. */ if (mbox_sts[1] == 1) qla4xxx_process_ddb_changed(ha, mbox_sts[2], mbox_sts[3], mbox_sts[4]); break; } } spin_lock_irqsave(&ha->hardware_lock, flags); } spin_unlock_irqrestore(&ha->hardware_lock, flags); } int qla4xxx_request_irqs(struct scsi_qla_host *ha) { int ret = 0; int rval = QLA_ERROR; if (is_qla40XX(ha)) goto try_intx; if (ql4xenablemsix == 2) { /* Note: MSI Interrupts not supported for ISP8324 and ISP8042 */ if (is_qla8032(ha) || is_qla8042(ha)) { ql4_printk(KERN_INFO, ha, "%s: MSI Interrupts not supported for ISP%04x, Falling back-to INTx mode\n", __func__, ha->pdev->device); goto try_intx; } goto try_msi; } if (ql4xenablemsix == 0 || ql4xenablemsix != 1) goto try_intx; /* Trying MSI-X */ ret = qla4_8xxx_enable_msix(ha); if (!ret) { DEBUG2(ql4_printk(KERN_INFO, ha, "MSI-X: Enabled (0x%X).\n", ha->revision_id)); goto irq_attached; } else { if (is_qla8032(ha) || is_qla8042(ha)) { ql4_printk(KERN_INFO, ha, "%s: ISP%04x: MSI-X: Falling back-to INTx mode. ret = %d\n", __func__, ha->pdev->device, ret); goto try_intx; } } ql4_printk(KERN_WARNING, ha, "MSI-X: Falling back-to MSI mode -- %d.\n", ret); try_msi: /* Trying MSI */ ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI); if (ret > 0) { ret = request_irq(ha->pdev->irq, qla4_8xxx_msi_handler, 0, DRIVER_NAME, ha); if (!ret) { DEBUG2(ql4_printk(KERN_INFO, ha, "MSI: Enabled.\n")); goto irq_attached; } else { ql4_printk(KERN_WARNING, ha, "MSI: Failed to reserve interrupt %d " "already in use.\n", ha->pdev->irq); pci_free_irq_vectors(ha->pdev); } } try_intx: if (is_qla8022(ha)) { ql4_printk(KERN_WARNING, ha, "%s: ISP82xx Legacy interrupt not supported\n", __func__); goto irq_not_attached; } /* Trying INTx */ ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, IRQF_SHARED, DRIVER_NAME, ha); if (!ret) { DEBUG2(ql4_printk(KERN_INFO, ha, "INTx: Enabled.\n")); goto irq_attached; } else { ql4_printk(KERN_WARNING, ha, "INTx: Failed to reserve interrupt %d already in" " use.\n", ha->pdev->irq); goto irq_not_attached; } irq_attached: set_bit(AF_IRQ_ATTACHED, &ha->flags); ha->host->irq = ha->pdev->irq; ql4_printk(KERN_INFO, ha, "%s: irq %d attached\n", __func__, ha->pdev->irq); rval = QLA_SUCCESS; irq_not_attached: return rval; } void qla4xxx_free_irqs(struct scsi_qla_host *ha) { if (!test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags)) return; if (ha->pdev->msix_enabled) free_irq(pci_irq_vector(ha->pdev, 1), ha); free_irq(pci_irq_vector(ha->pdev, 0), ha); pci_free_irq_vectors(ha->pdev); }
linux-master
drivers/scsi/qla4xxx/ql4_isr.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic iSCSI HBA Driver * Copyright (c) 2003-2013 QLogic Corporation */ #include "ql4_def.h" #include "ql4_glbl.h" #include "ql4_dbg.h" static ssize_t qla4_8xxx_sysfs_read_fw_dump(struct file *filep, struct kobject *kobj, struct bin_attribute *ba, char *buf, loff_t off, size_t count) { struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj, struct device, kobj))); if (is_qla40XX(ha)) return -EINVAL; if (!test_bit(AF_82XX_DUMP_READING, &ha->flags)) return 0; return memory_read_from_buffer(buf, count, &off, ha->fw_dump, ha->fw_dump_size); } static ssize_t qla4_8xxx_sysfs_write_fw_dump(struct file *filep, struct kobject *kobj, struct bin_attribute *ba, char *buf, loff_t off, size_t count) { struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj, struct device, kobj))); uint32_t dev_state; long reading; int ret = 0; if (is_qla40XX(ha)) return -EINVAL; if (off != 0) return ret; buf[1] = 0; ret = kstrtol(buf, 10, &reading); if (ret) { ql4_printk(KERN_ERR, ha, "%s: Invalid input. Return err %d\n", __func__, ret); return ret; } switch (reading) { case 0: /* clear dump collection flags */ if (test_and_clear_bit(AF_82XX_DUMP_READING, &ha->flags)) { clear_bit(AF_82XX_FW_DUMPED, &ha->flags); /* Reload minidump template */ qla4xxx_alloc_fw_dump(ha); DEBUG2(ql4_printk(KERN_INFO, ha, "Firmware template reloaded\n")); } break; case 1: /* Set flag to read dump */ if (test_bit(AF_82XX_FW_DUMPED, &ha->flags) && !test_bit(AF_82XX_DUMP_READING, &ha->flags)) { set_bit(AF_82XX_DUMP_READING, &ha->flags); DEBUG2(ql4_printk(KERN_INFO, ha, "Raw firmware dump ready for read on (%ld).\n", ha->host_no)); } break; case 2: /* Reset HBA and collect FW dump */ ha->isp_ops->idc_lock(ha); dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE); if (dev_state == QLA8XXX_DEV_READY) { ql4_printk(KERN_INFO, ha, "%s: Setting Need reset\n", __func__); qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, QLA8XXX_DEV_NEED_RESET); if (is_qla8022(ha) || ((is_qla8032(ha) || is_qla8042(ha)) && qla4_83xx_can_perform_reset(ha))) { set_bit(AF_8XXX_RST_OWNER, &ha->flags); set_bit(AF_FW_RECOVERY, &ha->flags); ql4_printk(KERN_INFO, ha, "%s: Reset owner is 0x%x\n", __func__, ha->func_num); } } else ql4_printk(KERN_INFO, ha, "%s: Reset not performed as device state is 0x%x\n", __func__, dev_state); ha->isp_ops->idc_unlock(ha); break; default: /* do nothing */ break; } return count; } static struct bin_attribute sysfs_fw_dump_attr = { .attr = { .name = "fw_dump", .mode = S_IRUSR | S_IWUSR, }, .size = 0, .read = qla4_8xxx_sysfs_read_fw_dump, .write = qla4_8xxx_sysfs_write_fw_dump, }; static struct sysfs_entry { char *name; struct bin_attribute *attr; } bin_file_entries[] = { { "fw_dump", &sysfs_fw_dump_attr }, { NULL }, }; void qla4_8xxx_alloc_sysfs_attr(struct scsi_qla_host *ha) { struct Scsi_Host *host = ha->host; struct sysfs_entry *iter; int ret; for (iter = bin_file_entries; iter->name; iter++) { ret = sysfs_create_bin_file(&host->shost_gendev.kobj, iter->attr); if (ret) ql4_printk(KERN_ERR, ha, "Unable to create sysfs %s binary attribute (%d).\n", iter->name, ret); } } void qla4_8xxx_free_sysfs_attr(struct scsi_qla_host *ha) { struct Scsi_Host *host = ha->host; struct sysfs_entry *iter; for (iter = bin_file_entries; iter->name; iter++) sysfs_remove_bin_file(&host->shost_gendev.kobj, iter->attr); } /* Scsi_Host attributes. */ static ssize_t qla4xxx_fw_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); if (is_qla80XX(ha)) return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n", ha->fw_info.fw_major, ha->fw_info.fw_minor, ha->fw_info.fw_patch, ha->fw_info.fw_build); else return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n", ha->fw_info.fw_major, ha->fw_info.fw_minor, ha->fw_info.fw_patch, ha->fw_info.fw_build); } static ssize_t qla4xxx_serial_num_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); return snprintf(buf, PAGE_SIZE, "%s\n", ha->serial_number); } static ssize_t qla4xxx_iscsi_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fw_info.iscsi_major, ha->fw_info.iscsi_minor); } static ssize_t qla4xxx_optrom_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n", ha->fw_info.bootload_major, ha->fw_info.bootload_minor, ha->fw_info.bootload_patch, ha->fw_info.bootload_build); } static ssize_t qla4xxx_board_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); return snprintf(buf, PAGE_SIZE, "0x%08X\n", ha->board_id); } static ssize_t qla4xxx_fw_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); qla4xxx_get_firmware_state(ha); return snprintf(buf, PAGE_SIZE, "0x%08X%8X\n", ha->firmware_state, ha->addl_fw_state); } static ssize_t qla4xxx_phy_port_cnt_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); if (is_qla40XX(ha)) return -ENOSYS; return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->phy_port_cnt); } static ssize_t qla4xxx_phy_port_num_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); if (is_qla40XX(ha)) return -ENOSYS; return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->phy_port_num); } static ssize_t qla4xxx_iscsi_func_cnt_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); if (is_qla40XX(ha)) return -ENOSYS; return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->iscsi_pci_func_cnt); } static ssize_t qla4xxx_hba_model_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); return snprintf(buf, PAGE_SIZE, "%s\n", ha->model_name); } static ssize_t qla4xxx_fw_timestamp_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); return snprintf(buf, PAGE_SIZE, "%s %s\n", ha->fw_info.fw_build_date, ha->fw_info.fw_build_time); } static ssize_t qla4xxx_fw_build_user_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); return snprintf(buf, PAGE_SIZE, "%s\n", ha->fw_info.fw_build_user); } static ssize_t qla4xxx_fw_ext_timestamp_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); return snprintf(buf, PAGE_SIZE, "%s\n", ha->fw_info.extended_timestamp); } static ssize_t qla4xxx_fw_load_src_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); char *load_src = NULL; switch (ha->fw_info.fw_load_source) { case 1: load_src = "Flash Primary"; break; case 2: load_src = "Flash Secondary"; break; case 3: load_src = "Host Download"; break; } return snprintf(buf, PAGE_SIZE, "%s\n", load_src); } static ssize_t qla4xxx_fw_uptime_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); qla4xxx_about_firmware(ha); return snprintf(buf, PAGE_SIZE, "%u.%u secs\n", ha->fw_uptime_secs, ha->fw_uptime_msecs); } static DEVICE_ATTR(fw_version, S_IRUGO, qla4xxx_fw_version_show, NULL); static DEVICE_ATTR(serial_num, S_IRUGO, qla4xxx_serial_num_show, NULL); static DEVICE_ATTR(iscsi_version, S_IRUGO, qla4xxx_iscsi_version_show, NULL); static DEVICE_ATTR(optrom_version, S_IRUGO, qla4xxx_optrom_version_show, NULL); static DEVICE_ATTR(board_id, S_IRUGO, qla4xxx_board_id_show, NULL); static DEVICE_ATTR(fw_state, S_IRUGO, qla4xxx_fw_state_show, NULL); static DEVICE_ATTR(phy_port_cnt, S_IRUGO, qla4xxx_phy_port_cnt_show, NULL); static DEVICE_ATTR(phy_port_num, S_IRUGO, qla4xxx_phy_port_num_show, NULL); static DEVICE_ATTR(iscsi_func_cnt, S_IRUGO, qla4xxx_iscsi_func_cnt_show, NULL); static DEVICE_ATTR(hba_model, S_IRUGO, qla4xxx_hba_model_show, NULL); static DEVICE_ATTR(fw_timestamp, S_IRUGO, qla4xxx_fw_timestamp_show, NULL); static DEVICE_ATTR(fw_build_user, S_IRUGO, qla4xxx_fw_build_user_show, NULL); static DEVICE_ATTR(fw_ext_timestamp, S_IRUGO, qla4xxx_fw_ext_timestamp_show, NULL); static DEVICE_ATTR(fw_load_src, S_IRUGO, qla4xxx_fw_load_src_show, NULL); static DEVICE_ATTR(fw_uptime, S_IRUGO, qla4xxx_fw_uptime_show, NULL); static struct attribute *qla4xxx_host_attrs[] = { &dev_attr_fw_version.attr, &dev_attr_serial_num.attr, &dev_attr_iscsi_version.attr, &dev_attr_optrom_version.attr, &dev_attr_board_id.attr, &dev_attr_fw_state.attr, &dev_attr_phy_port_cnt.attr, &dev_attr_phy_port_num.attr, &dev_attr_iscsi_func_cnt.attr, &dev_attr_hba_model.attr, &dev_attr_fw_timestamp.attr, &dev_attr_fw_build_user.attr, &dev_attr_fw_ext_timestamp.attr, &dev_attr_fw_load_src.attr, &dev_attr_fw_uptime.attr, NULL, }; static const struct attribute_group qla4xxx_host_attr_group = { .attrs = qla4xxx_host_attrs }; const struct attribute_group *qla4xxx_host_groups[] = { &qla4xxx_host_attr_group, NULL };
linux-master
drivers/scsi/qla4xxx/ql4_attr.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic iSCSI HBA Driver * Copyright (c) 2003-2012 QLogic Corporation */ #include "ql4_def.h" #include "ql4_glbl.h" #include "ql4_dbg.h" #include "ql4_inline.h" void qla4xxx_dump_buffer(void *b, uint32_t size) { uint32_t cnt; uint8_t *c = b; printk(" 0 1 2 3 4 5 6 7 8 9 Ah Bh Ch Dh Eh " "Fh\n"); printk("------------------------------------------------------------" "--\n"); for (cnt = 0; cnt < size; c++) { printk("%02x", *c); if (!(++cnt % 16)) printk("\n"); else printk(" "); } printk(KERN_INFO "\n"); } void qla4xxx_dump_registers(struct scsi_qla_host *ha) { uint8_t i; if (is_qla8022(ha)) { for (i = 1; i < MBOX_REG_COUNT; i++) printk(KERN_INFO "mailbox[%d] = 0x%08X\n", i, readl(&ha->qla4_82xx_reg->mailbox_in[i])); return; } for (i = 0; i < MBOX_REG_COUNT; i++) { printk(KERN_INFO "0x%02X mailbox[%d] = 0x%08X\n", (uint8_t) offsetof(struct isp_reg, mailbox[i]), i, readw(&ha->reg->mailbox[i])); } printk(KERN_INFO "0x%02X flash_address = 0x%08X\n", (uint8_t) offsetof(struct isp_reg, flash_address), readw(&ha->reg->flash_address)); printk(KERN_INFO "0x%02X flash_data = 0x%08X\n", (uint8_t) offsetof(struct isp_reg, flash_data), readw(&ha->reg->flash_data)); printk(KERN_INFO "0x%02X ctrl_status = 0x%08X\n", (uint8_t) offsetof(struct isp_reg, ctrl_status), readw(&ha->reg->ctrl_status)); if (is_qla4010(ha)) { printk(KERN_INFO "0x%02X nvram = 0x%08X\n", (uint8_t) offsetof(struct isp_reg, u1.isp4010.nvram), readw(&ha->reg->u1.isp4010.nvram)); } else if (is_qla4022(ha) | is_qla4032(ha)) { printk(KERN_INFO "0x%02X intr_mask = 0x%08X\n", (uint8_t) offsetof(struct isp_reg, u1.isp4022.intr_mask), readw(&ha->reg->u1.isp4022.intr_mask)); printk(KERN_INFO "0x%02X nvram = 0x%08X\n", (uint8_t) offsetof(struct isp_reg, u1.isp4022.nvram), readw(&ha->reg->u1.isp4022.nvram)); printk(KERN_INFO "0x%02X semaphore = 0x%08X\n", (uint8_t) offsetof(struct isp_reg, u1.isp4022.semaphore), readw(&ha->reg->u1.isp4022.semaphore)); } printk(KERN_INFO "0x%02X req_q_in = 0x%08X\n", (uint8_t) offsetof(struct isp_reg, req_q_in), readw(&ha->reg->req_q_in)); printk(KERN_INFO "0x%02X rsp_q_out = 0x%08X\n", (uint8_t) offsetof(struct isp_reg, rsp_q_out), readw(&ha->reg->rsp_q_out)); if (is_qla4010(ha)) { printk(KERN_INFO "0x%02X ext_hw_conf = 0x%08X\n", (uint8_t) offsetof(struct isp_reg, u2.isp4010.ext_hw_conf), readw(&ha->reg->u2.isp4010.ext_hw_conf)); printk(KERN_INFO "0x%02X port_ctrl = 0x%08X\n", (uint8_t) offsetof(struct isp_reg, u2.isp4010.port_ctrl), readw(&ha->reg->u2.isp4010.port_ctrl)); printk(KERN_INFO "0x%02X port_status = 0x%08X\n", (uint8_t) offsetof(struct isp_reg, u2.isp4010.port_status), readw(&ha->reg->u2.isp4010.port_status)); printk(KERN_INFO "0x%02X req_q_out = 0x%08X\n", (uint8_t) offsetof(struct isp_reg, u2.isp4010.req_q_out), readw(&ha->reg->u2.isp4010.req_q_out)); printk(KERN_INFO "0x%02X gp_out = 0x%08X\n", (uint8_t) offsetof(struct isp_reg, u2.isp4010.gp_out), readw(&ha->reg->u2.isp4010.gp_out)); printk(KERN_INFO "0x%02X gp_in = 0x%08X\n", (uint8_t) offsetof(struct isp_reg, u2.isp4010.gp_in), readw(&ha->reg->u2.isp4010.gp_in)); printk(KERN_INFO "0x%02X port_err_status = 0x%08X\n", (uint8_t) offsetof(struct isp_reg, u2.isp4010.port_err_status), readw(&ha->reg->u2.isp4010.port_err_status)); } else if (is_qla4022(ha) | is_qla4032(ha)) { printk(KERN_INFO "Page 0 Registers:\n"); printk(KERN_INFO "0x%02X ext_hw_conf = 0x%08X\n", (uint8_t) offsetof(struct isp_reg, u2.isp4022.p0.ext_hw_conf), readw(&ha->reg->u2.isp4022.p0.ext_hw_conf)); printk(KERN_INFO "0x%02X port_ctrl = 0x%08X\n", (uint8_t) offsetof(struct isp_reg, u2.isp4022.p0.port_ctrl), readw(&ha->reg->u2.isp4022.p0.port_ctrl)); printk(KERN_INFO "0x%02X port_status = 0x%08X\n", (uint8_t) offsetof(struct isp_reg, u2.isp4022.p0.port_status), readw(&ha->reg->u2.isp4022.p0.port_status)); printk(KERN_INFO "0x%02X gp_out = 0x%08X\n", (uint8_t) offsetof(struct isp_reg, u2.isp4022.p0.gp_out), readw(&ha->reg->u2.isp4022.p0.gp_out)); printk(KERN_INFO "0x%02X gp_in = 0x%08X\n", (uint8_t) offsetof(struct isp_reg, u2.isp4022.p0.gp_in), readw(&ha->reg->u2.isp4022.p0.gp_in)); printk(KERN_INFO "0x%02X port_err_status = 0x%08X\n", (uint8_t) offsetof(struct isp_reg, u2.isp4022.p0.port_err_status), readw(&ha->reg->u2.isp4022.p0.port_err_status)); printk(KERN_INFO "Page 1 Registers:\n"); writel(HOST_MEM_CFG_PAGE & set_rmask(CSR_SCSI_PAGE_SELECT), &ha->reg->ctrl_status); printk(KERN_INFO "0x%02X req_q_out = 0x%08X\n", (uint8_t) offsetof(struct isp_reg, u2.isp4022.p1.req_q_out), readw(&ha->reg->u2.isp4022.p1.req_q_out)); writel(PORT_CTRL_STAT_PAGE & set_rmask(CSR_SCSI_PAGE_SELECT), &ha->reg->ctrl_status); } } void qla4_8xxx_dump_peg_reg(struct scsi_qla_host *ha) { uint32_t halt_status1, halt_status2; halt_status1 = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_HALT_STATUS1); halt_status2 = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_HALT_STATUS2); if (is_qla8022(ha)) { ql4_printk(KERN_INFO, ha, "scsi(%ld): %s, ISP%04x Dumping hw/fw registers:\n" " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n" " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n" " PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n" " PEG_NET_4_PC: 0x%x\n", ha->host_no, __func__, ha->pdev->device, halt_status1, halt_status2, qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c), qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c), qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c), qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c), qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c)); } else if (is_qla8032(ha) || is_qla8042(ha)) { ql4_printk(KERN_INFO, ha, "scsi(%ld): %s, ISP%04x Dumping hw/fw registers:\n" " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n", ha->host_no, __func__, ha->pdev->device, halt_status1, halt_status2); } }
linux-master
drivers/scsi/qla4xxx/ql4_dbg.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic iSCSI HBA Driver * Copyright (c) 2003-2013 QLogic Corporation */ #include <linux/moduleparam.h> #include <linux/slab.h> #include <linux/blkdev.h> #include <linux/iscsi_boot_sysfs.h> #include <linux/inet.h> #include <scsi/scsi_tcq.h> #include <scsi/scsicam.h> #include "ql4_def.h" #include "ql4_version.h" #include "ql4_glbl.h" #include "ql4_dbg.h" #include "ql4_inline.h" #include "ql4_83xx.h" /* * Driver version */ static char qla4xxx_version_str[40]; /* * SRB allocation cache */ static struct kmem_cache *srb_cachep; /* * Module parameter information and variables */ static int ql4xdisablesysfsboot = 1; module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ql4xdisablesysfsboot, " Set to disable exporting boot targets to sysfs.\n" "\t\t 0 - Export boot targets\n" "\t\t 1 - Do not export boot targets (Default)"); int ql4xdontresethba; module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ql4xdontresethba, " Don't reset the HBA for driver recovery.\n" "\t\t 0 - It will reset HBA (Default)\n" "\t\t 1 - It will NOT reset HBA"); int ql4xextended_error_logging; module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ql4xextended_error_logging, " Option to enable extended error logging.\n" "\t\t 0 - no logging (Default)\n" "\t\t 2 - debug logging"); int ql4xenablemsix = 1; module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql4xenablemsix, " Set to enable MSI or MSI-X interrupt mechanism.\n" "\t\t 0 = enable INTx interrupt mechanism.\n" "\t\t 1 = enable MSI-X interrupt mechanism (Default).\n" "\t\t 2 = enable MSI interrupt mechanism."); #define QL4_DEF_QDEPTH 32 static int ql4xmaxqdepth = QL4_DEF_QDEPTH; module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ql4xmaxqdepth, " Maximum queue depth to report for target devices.\n" "\t\t Default: 32."); static int ql4xqfulltracking = 1; module_param(ql4xqfulltracking, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ql4xqfulltracking, " Enable or disable dynamic tracking and adjustment of\n" "\t\t scsi device queue depth.\n" "\t\t 0 - Disable.\n" "\t\t 1 - Enable. (Default)"); static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO; module_param(ql4xsess_recovery_tmo, int, S_IRUGO); MODULE_PARM_DESC(ql4xsess_recovery_tmo, " Target Session Recovery Timeout.\n" "\t\t Default: 120 sec."); int ql4xmdcapmask = 0; module_param(ql4xmdcapmask, int, S_IRUGO); MODULE_PARM_DESC(ql4xmdcapmask, " Set the Minidump driver capture mask level.\n" "\t\t Default is 0 (firmware default capture mask)\n" "\t\t Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF"); int ql4xenablemd = 1; module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ql4xenablemd, " Set to enable minidump.\n" "\t\t 0 - disable minidump\n" "\t\t 1 - enable minidump (Default)"); static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha); /* * SCSI host template entry points */ static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha); /* * iSCSI template entry points */ static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess, enum iscsi_param param, char *buf); static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn, enum iscsi_param param, char *buf); static int qla4xxx_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param, char *buf); static int qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len); static int qla4xxx_get_iface_param(struct iscsi_iface *iface, enum iscsi_param_type param_type, int param, char *buf); static enum scsi_timeout_action qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc); static struct iscsi_endpoint *qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, int non_blocking); static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms); static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep); static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param param, char *buf); static int qla4xxx_conn_start(struct iscsi_cls_conn *conn); static struct iscsi_cls_conn * qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx); static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session, struct iscsi_cls_conn *cls_conn, uint64_t transport_fd, int is_leading); static void qla4xxx_conn_destroy(struct iscsi_cls_conn *conn); static struct iscsi_cls_session * qla4xxx_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max, uint16_t qdepth, uint32_t initial_cmdsn); static void qla4xxx_session_destroy(struct iscsi_cls_session *sess); static void qla4xxx_task_work(struct work_struct *wdata); static int qla4xxx_alloc_pdu(struct iscsi_task *, uint8_t); static int qla4xxx_task_xmit(struct iscsi_task *); static void qla4xxx_task_cleanup(struct iscsi_task *); static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session); static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats); static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num, uint32_t iface_type, uint32_t payload_size, uint32_t pid, struct sockaddr *dst_addr); static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx, uint32_t *num_entries, char *buf); static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx); static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, int len); static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len); /* * SCSI host template entry points */ static int qla4xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd); static int qla4xxx_eh_abort(struct scsi_cmnd *cmd); static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd); static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd); static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd); static int qla4xxx_slave_alloc(struct scsi_device *device); static umode_t qla4_attr_is_visible(int param_type, int param); static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type); /* * iSCSI Flash DDB sysfs entry points */ static int qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess, struct iscsi_bus_flash_conn *fnode_conn, void *data, int len); static int qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess, int param, char *buf); static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf, int len); static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess); static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess, struct iscsi_bus_flash_conn *fnode_conn); static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess, struct iscsi_bus_flash_conn *fnode_conn); static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess); static struct qla4_8xxx_legacy_intr_set legacy_intr[] = QLA82XX_LEGACY_INTR_CONFIG; static const uint32_t qla4_82xx_reg_tbl[] = { QLA82XX_PEG_HALT_STATUS1, QLA82XX_PEG_HALT_STATUS2, QLA82XX_PEG_ALIVE_COUNTER, QLA82XX_CRB_DRV_ACTIVE, QLA82XX_CRB_DEV_STATE, QLA82XX_CRB_DRV_STATE, QLA82XX_CRB_DRV_SCRATCH, QLA82XX_CRB_DEV_PART_INFO, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_FW_VERSION_MAJOR, QLA82XX_FW_VERSION_MINOR, QLA82XX_FW_VERSION_SUB, CRB_CMDPEG_STATE, CRB_TEMP_STATE, }; static const uint32_t qla4_83xx_reg_tbl[] = { QLA83XX_PEG_HALT_STATUS1, QLA83XX_PEG_HALT_STATUS2, QLA83XX_PEG_ALIVE_COUNTER, QLA83XX_CRB_DRV_ACTIVE, QLA83XX_CRB_DEV_STATE, QLA83XX_CRB_DRV_STATE, QLA83XX_CRB_DRV_SCRATCH, QLA83XX_CRB_DEV_PART_INFO1, QLA83XX_CRB_IDC_VER_MAJOR, QLA83XX_FW_VER_MAJOR, QLA83XX_FW_VER_MINOR, QLA83XX_FW_VER_SUB, QLA83XX_CMDPEG_STATE, QLA83XX_ASIC_TEMP, }; static struct scsi_host_template qla4xxx_driver_template = { .module = THIS_MODULE, .name = DRIVER_NAME, .proc_name = DRIVER_NAME, .queuecommand = qla4xxx_queuecommand, .cmd_size = sizeof(struct qla4xxx_cmd_priv), .eh_abort_handler = qla4xxx_eh_abort, .eh_device_reset_handler = qla4xxx_eh_device_reset, .eh_target_reset_handler = qla4xxx_eh_target_reset, .eh_host_reset_handler = qla4xxx_eh_host_reset, .eh_timed_out = qla4xxx_eh_cmd_timed_out, .slave_alloc = qla4xxx_slave_alloc, .change_queue_depth = scsi_change_queue_depth, .this_id = -1, .cmd_per_lun = 3, .sg_tablesize = SG_ALL, .max_sectors = 0xFFFF, .shost_groups = qla4xxx_host_groups, .host_reset = qla4xxx_host_reset, .vendor_id = SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC, }; static struct iscsi_transport qla4xxx_iscsi_transport = { .owner = THIS_MODULE, .name = DRIVER_NAME, .caps = CAP_TEXT_NEGO | CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST | CAP_DATADGST | CAP_LOGIN_OFFLOAD | CAP_MULTI_R2T, .attr_is_visible = qla4_attr_is_visible, .create_session = qla4xxx_session_create, .destroy_session = qla4xxx_session_destroy, .start_conn = qla4xxx_conn_start, .create_conn = qla4xxx_conn_create, .bind_conn = qla4xxx_conn_bind, .unbind_conn = iscsi_conn_unbind, .stop_conn = iscsi_conn_stop, .destroy_conn = qla4xxx_conn_destroy, .set_param = iscsi_set_param, .get_conn_param = qla4xxx_conn_get_param, .get_session_param = qla4xxx_session_get_param, .get_ep_param = qla4xxx_get_ep_param, .ep_connect = qla4xxx_ep_connect, .ep_poll = qla4xxx_ep_poll, .ep_disconnect = qla4xxx_ep_disconnect, .get_stats = qla4xxx_conn_get_stats, .send_pdu = iscsi_conn_send_pdu, .xmit_task = qla4xxx_task_xmit, .cleanup_task = qla4xxx_task_cleanup, .alloc_pdu = qla4xxx_alloc_pdu, .get_host_param = qla4xxx_host_get_param, .set_iface_param = qla4xxx_iface_set_param, .get_iface_param = qla4xxx_get_iface_param, .bsg_request = qla4xxx_bsg_request, .send_ping = qla4xxx_send_ping, .get_chap = qla4xxx_get_chap_list, .delete_chap = qla4xxx_delete_chap, .set_chap = qla4xxx_set_chap_entry, .get_flashnode_param = qla4xxx_sysfs_ddb_get_param, .set_flashnode_param = qla4xxx_sysfs_ddb_set_param, .new_flashnode = qla4xxx_sysfs_ddb_add, .del_flashnode = qla4xxx_sysfs_ddb_delete, .login_flashnode = qla4xxx_sysfs_ddb_login, .logout_flashnode = qla4xxx_sysfs_ddb_logout, .logout_flashnode_sid = qla4xxx_sysfs_ddb_logout_sid, .get_host_stats = qla4xxx_get_host_stats, }; static struct scsi_transport_template *qla4xxx_scsi_transport; static int qla4xxx_isp_check_reg(struct scsi_qla_host *ha) { u32 reg_val = 0; int rval = QLA_SUCCESS; if (is_qla8022(ha)) reg_val = readl(&ha->qla4_82xx_reg->host_status); else if (is_qla8032(ha) || is_qla8042(ha)) reg_val = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER); else reg_val = readw(&ha->reg->ctrl_status); if (reg_val == QL4_ISP_REG_DISCONNECT) rval = QLA_ERROR; return rval; } static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num, uint32_t iface_type, uint32_t payload_size, uint32_t pid, struct sockaddr *dst_addr) { struct scsi_qla_host *ha = to_qla_host(shost); struct sockaddr_in *addr; struct sockaddr_in6 *addr6; uint32_t options = 0; uint8_t ipaddr[IPv6_ADDR_LEN]; int rval; memset(ipaddr, 0, IPv6_ADDR_LEN); /* IPv4 to IPv4 */ if ((iface_type == ISCSI_IFACE_TYPE_IPV4) && (dst_addr->sa_family == AF_INET)) { addr = (struct sockaddr_in *)dst_addr; memcpy(ipaddr, &addr->sin_addr.s_addr, IP_ADDR_LEN); DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv4 Ping src: %pI4 " "dest: %pI4\n", __func__, &ha->ip_config.ip_address, ipaddr)); rval = qla4xxx_ping_iocb(ha, options, payload_size, pid, ipaddr); if (rval) rval = -EINVAL; } else if ((iface_type == ISCSI_IFACE_TYPE_IPV6) && (dst_addr->sa_family == AF_INET6)) { /* IPv6 to IPv6 */ addr6 = (struct sockaddr_in6 *)dst_addr; memcpy(ipaddr, &addr6->sin6_addr.in6_u.u6_addr8, IPv6_ADDR_LEN); options |= PING_IPV6_PROTOCOL_ENABLE; /* Ping using LinkLocal address */ if ((iface_num == 0) || (iface_num == 1)) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: LinkLocal Ping " "src: %pI6 dest: %pI6\n", __func__, &ha->ip_config.ipv6_link_local_addr, ipaddr)); options |= PING_IPV6_LINKLOCAL_ADDR; rval = qla4xxx_ping_iocb(ha, options, payload_size, pid, ipaddr); } else { ql4_printk(KERN_WARNING, ha, "%s: iface num = %d " "not supported\n", __func__, iface_num); rval = -ENOSYS; goto exit_send_ping; } /* * If ping using LinkLocal address fails, try ping using * IPv6 address */ if (rval != QLA_SUCCESS) { options &= ~PING_IPV6_LINKLOCAL_ADDR; if (iface_num == 0) { options |= PING_IPV6_ADDR0; DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 " "Ping src: %pI6 " "dest: %pI6\n", __func__, &ha->ip_config.ipv6_addr0, ipaddr)); } else if (iface_num == 1) { options |= PING_IPV6_ADDR1; DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 " "Ping src: %pI6 " "dest: %pI6\n", __func__, &ha->ip_config.ipv6_addr1, ipaddr)); } rval = qla4xxx_ping_iocb(ha, options, payload_size, pid, ipaddr); if (rval) rval = -EINVAL; } } else rval = -ENOSYS; exit_send_ping: return rval; } static umode_t qla4_attr_is_visible(int param_type, int param) { switch (param_type) { case ISCSI_HOST_PARAM: switch (param) { case ISCSI_HOST_PARAM_HWADDRESS: case ISCSI_HOST_PARAM_IPADDRESS: case ISCSI_HOST_PARAM_INITIATOR_NAME: case ISCSI_HOST_PARAM_PORT_STATE: case ISCSI_HOST_PARAM_PORT_SPEED: return S_IRUGO; default: return 0; } case ISCSI_PARAM: switch (param) { case ISCSI_PARAM_PERSISTENT_ADDRESS: case ISCSI_PARAM_PERSISTENT_PORT: case ISCSI_PARAM_CONN_ADDRESS: case ISCSI_PARAM_CONN_PORT: case ISCSI_PARAM_TARGET_NAME: case ISCSI_PARAM_TPGT: case ISCSI_PARAM_TARGET_ALIAS: case ISCSI_PARAM_MAX_BURST: case ISCSI_PARAM_MAX_R2T: case ISCSI_PARAM_FIRST_BURST: case ISCSI_PARAM_MAX_RECV_DLENGTH: case ISCSI_PARAM_MAX_XMIT_DLENGTH: case ISCSI_PARAM_IFACE_NAME: case ISCSI_PARAM_CHAP_OUT_IDX: case ISCSI_PARAM_CHAP_IN_IDX: case ISCSI_PARAM_USERNAME: case ISCSI_PARAM_PASSWORD: case ISCSI_PARAM_USERNAME_IN: case ISCSI_PARAM_PASSWORD_IN: case ISCSI_PARAM_AUTO_SND_TGT_DISABLE: case ISCSI_PARAM_DISCOVERY_SESS: case ISCSI_PARAM_PORTAL_TYPE: case ISCSI_PARAM_CHAP_AUTH_EN: case ISCSI_PARAM_DISCOVERY_LOGOUT_EN: case ISCSI_PARAM_BIDI_CHAP_EN: case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL: case ISCSI_PARAM_DEF_TIME2WAIT: case ISCSI_PARAM_DEF_TIME2RETAIN: case ISCSI_PARAM_HDRDGST_EN: case ISCSI_PARAM_DATADGST_EN: case ISCSI_PARAM_INITIAL_R2T_EN: case ISCSI_PARAM_IMM_DATA_EN: case ISCSI_PARAM_PDU_INORDER_EN: case ISCSI_PARAM_DATASEQ_INORDER_EN: case ISCSI_PARAM_MAX_SEGMENT_SIZE: case ISCSI_PARAM_TCP_TIMESTAMP_STAT: case ISCSI_PARAM_TCP_WSF_DISABLE: case ISCSI_PARAM_TCP_NAGLE_DISABLE: case ISCSI_PARAM_TCP_TIMER_SCALE: case ISCSI_PARAM_TCP_TIMESTAMP_EN: case ISCSI_PARAM_TCP_XMIT_WSF: case ISCSI_PARAM_TCP_RECV_WSF: case ISCSI_PARAM_IP_FRAGMENT_DISABLE: case ISCSI_PARAM_IPV4_TOS: case ISCSI_PARAM_IPV6_TC: case ISCSI_PARAM_IPV6_FLOW_LABEL: case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6: case ISCSI_PARAM_KEEPALIVE_TMO: case ISCSI_PARAM_LOCAL_PORT: case ISCSI_PARAM_ISID: case ISCSI_PARAM_TSID: case ISCSI_PARAM_DEF_TASKMGMT_TMO: case ISCSI_PARAM_ERL: case ISCSI_PARAM_STATSN: case ISCSI_PARAM_EXP_STATSN: case ISCSI_PARAM_DISCOVERY_PARENT_IDX: case ISCSI_PARAM_DISCOVERY_PARENT_TYPE: case ISCSI_PARAM_LOCAL_IPADDR: return S_IRUGO; default: return 0; } case ISCSI_NET_PARAM: switch (param) { case ISCSI_NET_PARAM_IPV4_ADDR: case ISCSI_NET_PARAM_IPV4_SUBNET: case ISCSI_NET_PARAM_IPV4_GW: case ISCSI_NET_PARAM_IPV4_BOOTPROTO: case ISCSI_NET_PARAM_IFACE_ENABLE: case ISCSI_NET_PARAM_IPV6_LINKLOCAL: case ISCSI_NET_PARAM_IPV6_ADDR: case ISCSI_NET_PARAM_IPV6_ROUTER: case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG: case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG: case ISCSI_NET_PARAM_VLAN_ID: case ISCSI_NET_PARAM_VLAN_PRIORITY: case ISCSI_NET_PARAM_VLAN_ENABLED: case ISCSI_NET_PARAM_MTU: case ISCSI_NET_PARAM_PORT: case ISCSI_NET_PARAM_IPADDR_STATE: case ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE: case ISCSI_NET_PARAM_IPV6_ROUTER_STATE: case ISCSI_NET_PARAM_DELAYED_ACK_EN: case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: case ISCSI_NET_PARAM_TCP_WSF_DISABLE: case ISCSI_NET_PARAM_TCP_WSF: case ISCSI_NET_PARAM_TCP_TIMER_SCALE: case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: case ISCSI_NET_PARAM_CACHE_ID: case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN: case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN: case ISCSI_NET_PARAM_IPV4_TOS_EN: case ISCSI_NET_PARAM_IPV4_TOS: case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN: case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN: case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID: case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN: case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN: case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID: case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN: case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE: case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN: case ISCSI_NET_PARAM_REDIRECT_EN: case ISCSI_NET_PARAM_IPV4_TTL: case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN: case ISCSI_NET_PARAM_IPV6_MLD_EN: case ISCSI_NET_PARAM_IPV6_FLOW_LABEL: case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS: case ISCSI_NET_PARAM_IPV6_HOP_LIMIT: case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO: case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME: case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO: case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT: case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU: return S_IRUGO; default: return 0; } case ISCSI_IFACE_PARAM: switch (param) { case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO: case ISCSI_IFACE_PARAM_HDRDGST_EN: case ISCSI_IFACE_PARAM_DATADGST_EN: case ISCSI_IFACE_PARAM_IMM_DATA_EN: case ISCSI_IFACE_PARAM_INITIAL_R2T_EN: case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN: case ISCSI_IFACE_PARAM_PDU_INORDER_EN: case ISCSI_IFACE_PARAM_ERL: case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH: case ISCSI_IFACE_PARAM_FIRST_BURST: case ISCSI_IFACE_PARAM_MAX_R2T: case ISCSI_IFACE_PARAM_MAX_BURST: case ISCSI_IFACE_PARAM_CHAP_AUTH_EN: case ISCSI_IFACE_PARAM_BIDI_CHAP_EN: case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL: case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN: case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN: case ISCSI_IFACE_PARAM_INITIATOR_NAME: return S_IRUGO; default: return 0; } case ISCSI_FLASHNODE_PARAM: switch (param) { case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6: case ISCSI_FLASHNODE_PORTAL_TYPE: case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE: case ISCSI_FLASHNODE_DISCOVERY_SESS: case ISCSI_FLASHNODE_ENTRY_EN: case ISCSI_FLASHNODE_HDR_DGST_EN: case ISCSI_FLASHNODE_DATA_DGST_EN: case ISCSI_FLASHNODE_IMM_DATA_EN: case ISCSI_FLASHNODE_INITIAL_R2T_EN: case ISCSI_FLASHNODE_DATASEQ_INORDER: case ISCSI_FLASHNODE_PDU_INORDER: case ISCSI_FLASHNODE_CHAP_AUTH_EN: case ISCSI_FLASHNODE_SNACK_REQ_EN: case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN: case ISCSI_FLASHNODE_BIDI_CHAP_EN: case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL: case ISCSI_FLASHNODE_ERL: case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT: case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE: case ISCSI_FLASHNODE_TCP_WSF_DISABLE: case ISCSI_FLASHNODE_TCP_TIMER_SCALE: case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN: case ISCSI_FLASHNODE_IP_FRAG_DISABLE: case ISCSI_FLASHNODE_MAX_RECV_DLENGTH: case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH: case ISCSI_FLASHNODE_FIRST_BURST: case ISCSI_FLASHNODE_DEF_TIME2WAIT: case ISCSI_FLASHNODE_DEF_TIME2RETAIN: case ISCSI_FLASHNODE_MAX_R2T: case ISCSI_FLASHNODE_KEEPALIVE_TMO: case ISCSI_FLASHNODE_ISID: case ISCSI_FLASHNODE_TSID: case ISCSI_FLASHNODE_PORT: case ISCSI_FLASHNODE_MAX_BURST: case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO: case ISCSI_FLASHNODE_IPADDR: case ISCSI_FLASHNODE_ALIAS: case ISCSI_FLASHNODE_REDIRECT_IPADDR: case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE: case ISCSI_FLASHNODE_LOCAL_PORT: case ISCSI_FLASHNODE_IPV4_TOS: case ISCSI_FLASHNODE_IPV6_TC: case ISCSI_FLASHNODE_IPV6_FLOW_LABEL: case ISCSI_FLASHNODE_NAME: case ISCSI_FLASHNODE_TPGT: case ISCSI_FLASHNODE_LINK_LOCAL_IPV6: case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX: case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE: case ISCSI_FLASHNODE_TCP_XMIT_WSF: case ISCSI_FLASHNODE_TCP_RECV_WSF: case ISCSI_FLASHNODE_CHAP_OUT_IDX: case ISCSI_FLASHNODE_USERNAME: case ISCSI_FLASHNODE_PASSWORD: case ISCSI_FLASHNODE_STATSN: case ISCSI_FLASHNODE_EXP_STATSN: case ISCSI_FLASHNODE_IS_BOOT_TGT: return S_IRUGO; default: return 0; } } return 0; } /** * qla4xxx_create_chap_list - Create CHAP list from FLASH * @ha: pointer to adapter structure * * Read flash and make a list of CHAP entries, during login when a CHAP entry * is received, it will be checked in this list. If entry exist then the CHAP * entry index is set in the DDB. If CHAP entry does not exist in this list * then a new entry is added in FLASH in CHAP table and the index obtained is * used in the DDB. **/ static void qla4xxx_create_chap_list(struct scsi_qla_host *ha) { int rval = 0; uint8_t *chap_flash_data = NULL; uint32_t offset; dma_addr_t chap_dma; uint32_t chap_size = 0; if (is_qla40XX(ha)) chap_size = MAX_CHAP_ENTRIES_40XX * sizeof(struct ql4_chap_table); else /* Single region contains CHAP info for both * ports which is divided into half for each port. */ chap_size = ha->hw.flt_chap_size / 2; chap_flash_data = dma_alloc_coherent(&ha->pdev->dev, chap_size, &chap_dma, GFP_KERNEL); if (!chap_flash_data) { ql4_printk(KERN_ERR, ha, "No memory for chap_flash_data\n"); return; } if (is_qla40XX(ha)) { offset = FLASH_CHAP_OFFSET; } else { offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2); if (ha->port_num == 1) offset += chap_size; } rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size); if (rval != QLA_SUCCESS) goto exit_chap_list; if (ha->chap_list == NULL) ha->chap_list = vmalloc(chap_size); if (ha->chap_list == NULL) { ql4_printk(KERN_ERR, ha, "No memory for ha->chap_list\n"); goto exit_chap_list; } memcpy(ha->chap_list, chap_flash_data, chap_size); exit_chap_list: dma_free_coherent(&ha->pdev->dev, chap_size, chap_flash_data, chap_dma); } static int qla4xxx_get_chap_by_index(struct scsi_qla_host *ha, int16_t chap_index, struct ql4_chap_table **chap_entry) { int rval = QLA_ERROR; int max_chap_entries; if (!ha->chap_list) { ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n"); goto exit_get_chap; } if (is_qla80XX(ha)) max_chap_entries = (ha->hw.flt_chap_size / 2) / sizeof(struct ql4_chap_table); else max_chap_entries = MAX_CHAP_ENTRIES_40XX; if (chap_index > max_chap_entries) { ql4_printk(KERN_ERR, ha, "Invalid Chap index\n"); goto exit_get_chap; } *chap_entry = (struct ql4_chap_table *)ha->chap_list + chap_index; if ((*chap_entry)->cookie != cpu_to_le16(CHAP_VALID_COOKIE)) { *chap_entry = NULL; } else { rval = QLA_SUCCESS; } exit_get_chap: return rval; } /** * qla4xxx_find_free_chap_index - Find the first free chap index * @ha: pointer to adapter structure * @chap_index: CHAP index to be returned * * Find the first free chap index available in the chap table * * Note: Caller should acquire the chap lock before getting here. **/ static int qla4xxx_find_free_chap_index(struct scsi_qla_host *ha, uint16_t *chap_index) { int i, rval; int free_index = -1; int max_chap_entries = 0; struct ql4_chap_table *chap_table; if (is_qla80XX(ha)) max_chap_entries = (ha->hw.flt_chap_size / 2) / sizeof(struct ql4_chap_table); else max_chap_entries = MAX_CHAP_ENTRIES_40XX; if (!ha->chap_list) { ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n"); rval = QLA_ERROR; goto exit_find_chap; } for (i = 0; i < max_chap_entries; i++) { chap_table = (struct ql4_chap_table *)ha->chap_list + i; if ((chap_table->cookie != cpu_to_le16(CHAP_VALID_COOKIE)) && (i > MAX_RESRV_CHAP_IDX)) { free_index = i; break; } } if (free_index != -1) { *chap_index = free_index; rval = QLA_SUCCESS; } else { rval = QLA_ERROR; } exit_find_chap: return rval; } static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx, uint32_t *num_entries, char *buf) { struct scsi_qla_host *ha = to_qla_host(shost); struct ql4_chap_table *chap_table; struct iscsi_chap_rec *chap_rec; int max_chap_entries = 0; int valid_chap_entries = 0; int ret = 0, i; if (is_qla80XX(ha)) max_chap_entries = (ha->hw.flt_chap_size / 2) / sizeof(struct ql4_chap_table); else max_chap_entries = MAX_CHAP_ENTRIES_40XX; ql4_printk(KERN_INFO, ha, "%s: num_entries = %d, CHAP idx = %d\n", __func__, *num_entries, chap_tbl_idx); if (!buf) { ret = -ENOMEM; goto exit_get_chap_list; } qla4xxx_create_chap_list(ha); chap_rec = (struct iscsi_chap_rec *) buf; mutex_lock(&ha->chap_sem); for (i = chap_tbl_idx; i < max_chap_entries; i++) { chap_table = (struct ql4_chap_table *)ha->chap_list + i; if (chap_table->cookie != cpu_to_le16(CHAP_VALID_COOKIE)) continue; chap_rec->chap_tbl_idx = i; strscpy(chap_rec->username, chap_table->name, ISCSI_CHAP_AUTH_NAME_MAX_LEN); strscpy(chap_rec->password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN); chap_rec->password_length = chap_table->secret_len; if (chap_table->flags & BIT_7) /* local */ chap_rec->chap_type = CHAP_TYPE_OUT; if (chap_table->flags & BIT_6) /* peer */ chap_rec->chap_type = CHAP_TYPE_IN; chap_rec++; valid_chap_entries++; if (valid_chap_entries == *num_entries) break; } mutex_unlock(&ha->chap_sem); exit_get_chap_list: ql4_printk(KERN_INFO, ha, "%s: Valid CHAP Entries = %d\n", __func__, valid_chap_entries); *num_entries = valid_chap_entries; return ret; } static int __qla4xxx_is_chap_active(struct device *dev, void *data) { int ret = 0; uint16_t *chap_tbl_idx = (uint16_t *) data; struct iscsi_cls_session *cls_session; struct iscsi_session *sess; struct ddb_entry *ddb_entry; if (!iscsi_is_session_dev(dev)) goto exit_is_chap_active; cls_session = iscsi_dev_to_session(dev); sess = cls_session->dd_data; ddb_entry = sess->dd_data; if (iscsi_is_session_online(cls_session)) goto exit_is_chap_active; if (ddb_entry->chap_tbl_idx == *chap_tbl_idx) ret = 1; exit_is_chap_active: return ret; } static int qla4xxx_is_chap_active(struct Scsi_Host *shost, uint16_t chap_tbl_idx) { int ret = 0; ret = device_for_each_child(&shost->shost_gendev, &chap_tbl_idx, __qla4xxx_is_chap_active); return ret; } static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx) { struct scsi_qla_host *ha = to_qla_host(shost); struct ql4_chap_table *chap_table; dma_addr_t chap_dma; int max_chap_entries = 0; uint32_t offset = 0; uint32_t chap_size; int ret = 0; chap_table = dma_pool_zalloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma); if (chap_table == NULL) return -ENOMEM; if (is_qla80XX(ha)) max_chap_entries = (ha->hw.flt_chap_size / 2) / sizeof(struct ql4_chap_table); else max_chap_entries = MAX_CHAP_ENTRIES_40XX; if (chap_tbl_idx > max_chap_entries) { ret = -EINVAL; goto exit_delete_chap; } /* Check if chap index is in use. * If chap is in use don't delet chap entry */ ret = qla4xxx_is_chap_active(shost, chap_tbl_idx); if (ret) { ql4_printk(KERN_INFO, ha, "CHAP entry %d is in use, cannot " "delete from flash\n", chap_tbl_idx); ret = -EBUSY; goto exit_delete_chap; } chap_size = sizeof(struct ql4_chap_table); if (is_qla40XX(ha)) offset = FLASH_CHAP_OFFSET | (chap_tbl_idx * chap_size); else { offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2); /* flt_chap_size is CHAP table size for both ports * so divide it by 2 to calculate the offset for second port */ if (ha->port_num == 1) offset += (ha->hw.flt_chap_size / 2); offset += (chap_tbl_idx * chap_size); } ret = qla4xxx_get_flash(ha, chap_dma, offset, chap_size); if (ret != QLA_SUCCESS) { ret = -EINVAL; goto exit_delete_chap; } DEBUG2(ql4_printk(KERN_INFO, ha, "Chap Cookie: x%x\n", __le16_to_cpu(chap_table->cookie))); if (__le16_to_cpu(chap_table->cookie) != CHAP_VALID_COOKIE) { ql4_printk(KERN_ERR, ha, "No valid chap entry found\n"); goto exit_delete_chap; } chap_table->cookie = cpu_to_le16(0xFFFF); offset = FLASH_CHAP_OFFSET | (chap_tbl_idx * sizeof(struct ql4_chap_table)); ret = qla4xxx_set_flash(ha, chap_dma, offset, chap_size, FLASH_OPT_RMW_COMMIT); if (ret == QLA_SUCCESS && ha->chap_list) { mutex_lock(&ha->chap_sem); /* Update ha chap_list cache */ memcpy((struct ql4_chap_table *)ha->chap_list + chap_tbl_idx, chap_table, sizeof(struct ql4_chap_table)); mutex_unlock(&ha->chap_sem); } if (ret != QLA_SUCCESS) ret = -EINVAL; exit_delete_chap: dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma); return ret; } /** * qla4xxx_set_chap_entry - Make chap entry with given information * @shost: pointer to host * @data: chap info - credentials, index and type to make chap entry * @len: length of data * * Add or update chap entry with the given information **/ static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, int len) { struct scsi_qla_host *ha = to_qla_host(shost); struct iscsi_chap_rec chap_rec; struct ql4_chap_table *chap_entry = NULL; struct iscsi_param_info *param_info; struct nlattr *attr; int max_chap_entries = 0; int type; int rem = len; int rc = 0; int size; memset(&chap_rec, 0, sizeof(chap_rec)); nla_for_each_attr(attr, data, len, rem) { if (nla_len(attr) < sizeof(*param_info)) { rc = -EINVAL; goto exit_set_chap; } param_info = nla_data(attr); switch (param_info->param) { case ISCSI_CHAP_PARAM_INDEX: chap_rec.chap_tbl_idx = *(uint16_t *)param_info->value; break; case ISCSI_CHAP_PARAM_CHAP_TYPE: chap_rec.chap_type = param_info->value[0]; break; case ISCSI_CHAP_PARAM_USERNAME: size = min_t(size_t, sizeof(chap_rec.username), param_info->len); memcpy(chap_rec.username, param_info->value, size); break; case ISCSI_CHAP_PARAM_PASSWORD: size = min_t(size_t, sizeof(chap_rec.password), param_info->len); memcpy(chap_rec.password, param_info->value, size); break; case ISCSI_CHAP_PARAM_PASSWORD_LEN: chap_rec.password_length = param_info->value[0]; break; default: ql4_printk(KERN_ERR, ha, "%s: No such sysfs attribute\n", __func__); rc = -ENOSYS; goto exit_set_chap; } } if (chap_rec.chap_type == CHAP_TYPE_IN) type = BIDI_CHAP; else type = LOCAL_CHAP; if (is_qla80XX(ha)) max_chap_entries = (ha->hw.flt_chap_size / 2) / sizeof(struct ql4_chap_table); else max_chap_entries = MAX_CHAP_ENTRIES_40XX; mutex_lock(&ha->chap_sem); if (chap_rec.chap_tbl_idx < max_chap_entries) { rc = qla4xxx_get_chap_by_index(ha, chap_rec.chap_tbl_idx, &chap_entry); if (!rc) { if (!(type == qla4xxx_get_chap_type(chap_entry))) { ql4_printk(KERN_INFO, ha, "Type mismatch for CHAP entry %d\n", chap_rec.chap_tbl_idx); rc = -EINVAL; goto exit_unlock_chap; } /* If chap index is in use then don't modify it */ rc = qla4xxx_is_chap_active(shost, chap_rec.chap_tbl_idx); if (rc) { ql4_printk(KERN_INFO, ha, "CHAP entry %d is in use\n", chap_rec.chap_tbl_idx); rc = -EBUSY; goto exit_unlock_chap; } } } else { rc = qla4xxx_find_free_chap_index(ha, &chap_rec.chap_tbl_idx); if (rc) { ql4_printk(KERN_INFO, ha, "CHAP entry not available\n"); rc = -EBUSY; goto exit_unlock_chap; } } rc = qla4xxx_set_chap(ha, chap_rec.username, chap_rec.password, chap_rec.chap_tbl_idx, type); exit_unlock_chap: mutex_unlock(&ha->chap_sem); exit_set_chap: return rc; } static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len) { struct scsi_qla_host *ha = to_qla_host(shost); struct iscsi_offload_host_stats *host_stats = NULL; int host_stats_size; int ret = 0; int ddb_idx = 0; struct ql_iscsi_stats *ql_iscsi_stats = NULL; int stats_size; dma_addr_t iscsi_stats_dma; DEBUG2(ql4_printk(KERN_INFO, ha, "Func: %s\n", __func__)); host_stats_size = sizeof(struct iscsi_offload_host_stats); if (host_stats_size != len) { ql4_printk(KERN_INFO, ha, "%s: host_stats size mismatch expected = %d, is = %d\n", __func__, len, host_stats_size); ret = -EINVAL; goto exit_host_stats; } host_stats = (struct iscsi_offload_host_stats *)buf; if (!buf) { ret = -ENOMEM; goto exit_host_stats; } stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats)); ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size, &iscsi_stats_dma, GFP_KERNEL); if (!ql_iscsi_stats) { ql4_printk(KERN_ERR, ha, "Unable to allocate memory for iscsi stats\n"); ret = -ENOMEM; goto exit_host_stats; } ret = qla4xxx_get_mgmt_data(ha, ddb_idx, stats_size, iscsi_stats_dma); if (ret != QLA_SUCCESS) { ql4_printk(KERN_ERR, ha, "Unable to retrieve iscsi stats\n"); ret = -EIO; goto exit_host_stats; } host_stats->mactx_frames = le64_to_cpu(ql_iscsi_stats->mac_tx_frames); host_stats->mactx_bytes = le64_to_cpu(ql_iscsi_stats->mac_tx_bytes); host_stats->mactx_multicast_frames = le64_to_cpu(ql_iscsi_stats->mac_tx_multicast_frames); host_stats->mactx_broadcast_frames = le64_to_cpu(ql_iscsi_stats->mac_tx_broadcast_frames); host_stats->mactx_pause_frames = le64_to_cpu(ql_iscsi_stats->mac_tx_pause_frames); host_stats->mactx_control_frames = le64_to_cpu(ql_iscsi_stats->mac_tx_control_frames); host_stats->mactx_deferral = le64_to_cpu(ql_iscsi_stats->mac_tx_deferral); host_stats->mactx_excess_deferral = le64_to_cpu(ql_iscsi_stats->mac_tx_excess_deferral); host_stats->mactx_late_collision = le64_to_cpu(ql_iscsi_stats->mac_tx_late_collision); host_stats->mactx_abort = le64_to_cpu(ql_iscsi_stats->mac_tx_abort); host_stats->mactx_single_collision = le64_to_cpu(ql_iscsi_stats->mac_tx_single_collision); host_stats->mactx_multiple_collision = le64_to_cpu(ql_iscsi_stats->mac_tx_multiple_collision); host_stats->mactx_collision = le64_to_cpu(ql_iscsi_stats->mac_tx_collision); host_stats->mactx_frames_dropped = le64_to_cpu(ql_iscsi_stats->mac_tx_frames_dropped); host_stats->mactx_jumbo_frames = le64_to_cpu(ql_iscsi_stats->mac_tx_jumbo_frames); host_stats->macrx_frames = le64_to_cpu(ql_iscsi_stats->mac_rx_frames); host_stats->macrx_bytes = le64_to_cpu(ql_iscsi_stats->mac_rx_bytes); host_stats->macrx_unknown_control_frames = le64_to_cpu(ql_iscsi_stats->mac_rx_unknown_control_frames); host_stats->macrx_pause_frames = le64_to_cpu(ql_iscsi_stats->mac_rx_pause_frames); host_stats->macrx_control_frames = le64_to_cpu(ql_iscsi_stats->mac_rx_control_frames); host_stats->macrx_dribble = le64_to_cpu(ql_iscsi_stats->mac_rx_dribble); host_stats->macrx_frame_length_error = le64_to_cpu(ql_iscsi_stats->mac_rx_frame_length_error); host_stats->macrx_jabber = le64_to_cpu(ql_iscsi_stats->mac_rx_jabber); host_stats->macrx_carrier_sense_error = le64_to_cpu(ql_iscsi_stats->mac_rx_carrier_sense_error); host_stats->macrx_frame_discarded = le64_to_cpu(ql_iscsi_stats->mac_rx_frame_discarded); host_stats->macrx_frames_dropped = le64_to_cpu(ql_iscsi_stats->mac_rx_frames_dropped); host_stats->mac_crc_error = le64_to_cpu(ql_iscsi_stats->mac_crc_error); host_stats->mac_encoding_error = le64_to_cpu(ql_iscsi_stats->mac_encoding_error); host_stats->macrx_length_error_large = le64_to_cpu(ql_iscsi_stats->mac_rx_length_error_large); host_stats->macrx_length_error_small = le64_to_cpu(ql_iscsi_stats->mac_rx_length_error_small); host_stats->macrx_multicast_frames = le64_to_cpu(ql_iscsi_stats->mac_rx_multicast_frames); host_stats->macrx_broadcast_frames = le64_to_cpu(ql_iscsi_stats->mac_rx_broadcast_frames); host_stats->iptx_packets = le64_to_cpu(ql_iscsi_stats->ip_tx_packets); host_stats->iptx_bytes = le64_to_cpu(ql_iscsi_stats->ip_tx_bytes); host_stats->iptx_fragments = le64_to_cpu(ql_iscsi_stats->ip_tx_fragments); host_stats->iprx_packets = le64_to_cpu(ql_iscsi_stats->ip_rx_packets); host_stats->iprx_bytes = le64_to_cpu(ql_iscsi_stats->ip_rx_bytes); host_stats->iprx_fragments = le64_to_cpu(ql_iscsi_stats->ip_rx_fragments); host_stats->ip_datagram_reassembly = le64_to_cpu(ql_iscsi_stats->ip_datagram_reassembly); host_stats->ip_invalid_address_error = le64_to_cpu(ql_iscsi_stats->ip_invalid_address_error); host_stats->ip_error_packets = le64_to_cpu(ql_iscsi_stats->ip_error_packets); host_stats->ip_fragrx_overlap = le64_to_cpu(ql_iscsi_stats->ip_fragrx_overlap); host_stats->ip_fragrx_outoforder = le64_to_cpu(ql_iscsi_stats->ip_fragrx_outoforder); host_stats->ip_datagram_reassembly_timeout = le64_to_cpu(ql_iscsi_stats->ip_datagram_reassembly_timeout); host_stats->ipv6tx_packets = le64_to_cpu(ql_iscsi_stats->ipv6_tx_packets); host_stats->ipv6tx_bytes = le64_to_cpu(ql_iscsi_stats->ipv6_tx_bytes); host_stats->ipv6tx_fragments = le64_to_cpu(ql_iscsi_stats->ipv6_tx_fragments); host_stats->ipv6rx_packets = le64_to_cpu(ql_iscsi_stats->ipv6_rx_packets); host_stats->ipv6rx_bytes = le64_to_cpu(ql_iscsi_stats->ipv6_rx_bytes); host_stats->ipv6rx_fragments = le64_to_cpu(ql_iscsi_stats->ipv6_rx_fragments); host_stats->ipv6_datagram_reassembly = le64_to_cpu(ql_iscsi_stats->ipv6_datagram_reassembly); host_stats->ipv6_invalid_address_error = le64_to_cpu(ql_iscsi_stats->ipv6_invalid_address_error); host_stats->ipv6_error_packets = le64_to_cpu(ql_iscsi_stats->ipv6_error_packets); host_stats->ipv6_fragrx_overlap = le64_to_cpu(ql_iscsi_stats->ipv6_fragrx_overlap); host_stats->ipv6_fragrx_outoforder = le64_to_cpu(ql_iscsi_stats->ipv6_fragrx_outoforder); host_stats->ipv6_datagram_reassembly_timeout = le64_to_cpu(ql_iscsi_stats->ipv6_datagram_reassembly_timeout); host_stats->tcptx_segments = le64_to_cpu(ql_iscsi_stats->tcp_tx_segments); host_stats->tcptx_bytes = le64_to_cpu(ql_iscsi_stats->tcp_tx_bytes); host_stats->tcprx_segments = le64_to_cpu(ql_iscsi_stats->tcp_rx_segments); host_stats->tcprx_byte = le64_to_cpu(ql_iscsi_stats->tcp_rx_byte); host_stats->tcp_duplicate_ack_retx = le64_to_cpu(ql_iscsi_stats->tcp_duplicate_ack_retx); host_stats->tcp_retx_timer_expired = le64_to_cpu(ql_iscsi_stats->tcp_retx_timer_expired); host_stats->tcprx_duplicate_ack = le64_to_cpu(ql_iscsi_stats->tcp_rx_duplicate_ack); host_stats->tcprx_pure_ackr = le64_to_cpu(ql_iscsi_stats->tcp_rx_pure_ackr); host_stats->tcptx_delayed_ack = le64_to_cpu(ql_iscsi_stats->tcp_tx_delayed_ack); host_stats->tcptx_pure_ack = le64_to_cpu(ql_iscsi_stats->tcp_tx_pure_ack); host_stats->tcprx_segment_error = le64_to_cpu(ql_iscsi_stats->tcp_rx_segment_error); host_stats->tcprx_segment_outoforder = le64_to_cpu(ql_iscsi_stats->tcp_rx_segment_outoforder); host_stats->tcprx_window_probe = le64_to_cpu(ql_iscsi_stats->tcp_rx_window_probe); host_stats->tcprx_window_update = le64_to_cpu(ql_iscsi_stats->tcp_rx_window_update); host_stats->tcptx_window_probe_persist = le64_to_cpu(ql_iscsi_stats->tcp_tx_window_probe_persist); host_stats->ecc_error_correction = le64_to_cpu(ql_iscsi_stats->ecc_error_correction); host_stats->iscsi_pdu_tx = le64_to_cpu(ql_iscsi_stats->iscsi_pdu_tx); host_stats->iscsi_data_bytes_tx = le64_to_cpu(ql_iscsi_stats->iscsi_data_bytes_tx); host_stats->iscsi_pdu_rx = le64_to_cpu(ql_iscsi_stats->iscsi_pdu_rx); host_stats->iscsi_data_bytes_rx = le64_to_cpu(ql_iscsi_stats->iscsi_data_bytes_rx); host_stats->iscsi_io_completed = le64_to_cpu(ql_iscsi_stats->iscsi_io_completed); host_stats->iscsi_unexpected_io_rx = le64_to_cpu(ql_iscsi_stats->iscsi_unexpected_io_rx); host_stats->iscsi_format_error = le64_to_cpu(ql_iscsi_stats->iscsi_format_error); host_stats->iscsi_hdr_digest_error = le64_to_cpu(ql_iscsi_stats->iscsi_hdr_digest_error); host_stats->iscsi_data_digest_error = le64_to_cpu(ql_iscsi_stats->iscsi_data_digest_error); host_stats->iscsi_sequence_error = le64_to_cpu(ql_iscsi_stats->iscsi_sequence_error); exit_host_stats: if (ql_iscsi_stats) dma_free_coherent(&ha->pdev->dev, stats_size, ql_iscsi_stats, iscsi_stats_dma); ql4_printk(KERN_INFO, ha, "%s: Get host stats done\n", __func__); return ret; } static int qla4xxx_get_iface_param(struct iscsi_iface *iface, enum iscsi_param_type param_type, int param, char *buf) { struct Scsi_Host *shost = iscsi_iface_to_shost(iface); struct scsi_qla_host *ha = to_qla_host(shost); int ival; char *pval = NULL; int len = -ENOSYS; if (param_type == ISCSI_NET_PARAM) { switch (param) { case ISCSI_NET_PARAM_IPV4_ADDR: len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address); break; case ISCSI_NET_PARAM_IPV4_SUBNET: len = sprintf(buf, "%pI4\n", &ha->ip_config.subnet_mask); break; case ISCSI_NET_PARAM_IPV4_GW: len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway); break; case ISCSI_NET_PARAM_IFACE_ENABLE: if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { OP_STATE(ha->ip_config.ipv4_options, IPOPT_IPV4_PROTOCOL_ENABLE, pval); } else { OP_STATE(ha->ip_config.ipv6_options, IPV6_OPT_IPV6_PROTOCOL_ENABLE, pval); } len = sprintf(buf, "%s\n", pval); break; case ISCSI_NET_PARAM_IPV4_BOOTPROTO: len = sprintf(buf, "%s\n", (ha->ip_config.tcp_options & TCPOPT_DHCP_ENABLE) ? "dhcp" : "static"); break; case ISCSI_NET_PARAM_IPV6_ADDR: if (iface->iface_num == 0) len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr0); if (iface->iface_num == 1) len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr1); break; case ISCSI_NET_PARAM_IPV6_LINKLOCAL: len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_link_local_addr); break; case ISCSI_NET_PARAM_IPV6_ROUTER: len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_default_router_addr); break; case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG: pval = (ha->ip_config.ipv6_addl_options & IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ? "nd" : "static"; len = sprintf(buf, "%s\n", pval); break; case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG: pval = (ha->ip_config.ipv6_addl_options & IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ? "auto" : "static"; len = sprintf(buf, "%s\n", pval); break; case ISCSI_NET_PARAM_VLAN_ID: if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) ival = ha->ip_config.ipv4_vlan_tag & ISCSI_MAX_VLAN_ID; else ival = ha->ip_config.ipv6_vlan_tag & ISCSI_MAX_VLAN_ID; len = sprintf(buf, "%d\n", ival); break; case ISCSI_NET_PARAM_VLAN_PRIORITY: if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) ival = (ha->ip_config.ipv4_vlan_tag >> 13) & ISCSI_MAX_VLAN_PRIORITY; else ival = (ha->ip_config.ipv6_vlan_tag >> 13) & ISCSI_MAX_VLAN_PRIORITY; len = sprintf(buf, "%d\n", ival); break; case ISCSI_NET_PARAM_VLAN_ENABLED: if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { OP_STATE(ha->ip_config.ipv4_options, IPOPT_VLAN_TAGGING_ENABLE, pval); } else { OP_STATE(ha->ip_config.ipv6_options, IPV6_OPT_VLAN_TAGGING_ENABLE, pval); } len = sprintf(buf, "%s\n", pval); break; case ISCSI_NET_PARAM_MTU: len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size); break; case ISCSI_NET_PARAM_PORT: if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) len = sprintf(buf, "%d\n", ha->ip_config.ipv4_port); else len = sprintf(buf, "%d\n", ha->ip_config.ipv6_port); break; case ISCSI_NET_PARAM_IPADDR_STATE: if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { pval = iscsi_get_ipaddress_state_name( ha->ip_config.ipv4_addr_state); } else { if (iface->iface_num == 0) pval = iscsi_get_ipaddress_state_name( ha->ip_config.ipv6_addr0_state); else if (iface->iface_num == 1) pval = iscsi_get_ipaddress_state_name( ha->ip_config.ipv6_addr1_state); } len = sprintf(buf, "%s\n", pval); break; case ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE: pval = iscsi_get_ipaddress_state_name( ha->ip_config.ipv6_link_local_state); len = sprintf(buf, "%s\n", pval); break; case ISCSI_NET_PARAM_IPV6_ROUTER_STATE: pval = iscsi_get_router_state_name( ha->ip_config.ipv6_default_router_state); len = sprintf(buf, "%s\n", pval); break; case ISCSI_NET_PARAM_DELAYED_ACK_EN: if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { OP_STATE(~ha->ip_config.tcp_options, TCPOPT_DELAYED_ACK_DISABLE, pval); } else { OP_STATE(~ha->ip_config.ipv6_tcp_options, IPV6_TCPOPT_DELAYED_ACK_DISABLE, pval); } len = sprintf(buf, "%s\n", pval); break; case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { OP_STATE(~ha->ip_config.tcp_options, TCPOPT_NAGLE_ALGO_DISABLE, pval); } else { OP_STATE(~ha->ip_config.ipv6_tcp_options, IPV6_TCPOPT_NAGLE_ALGO_DISABLE, pval); } len = sprintf(buf, "%s\n", pval); break; case ISCSI_NET_PARAM_TCP_WSF_DISABLE: if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { OP_STATE(~ha->ip_config.tcp_options, TCPOPT_WINDOW_SCALE_DISABLE, pval); } else { OP_STATE(~ha->ip_config.ipv6_tcp_options, IPV6_TCPOPT_WINDOW_SCALE_DISABLE, pval); } len = sprintf(buf, "%s\n", pval); break; case ISCSI_NET_PARAM_TCP_WSF: if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) len = sprintf(buf, "%d\n", ha->ip_config.tcp_wsf); else len = sprintf(buf, "%d\n", ha->ip_config.ipv6_tcp_wsf); break; case ISCSI_NET_PARAM_TCP_TIMER_SCALE: if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) ival = (ha->ip_config.tcp_options & TCPOPT_TIMER_SCALE) >> 1; else ival = (ha->ip_config.ipv6_tcp_options & IPV6_TCPOPT_TIMER_SCALE) >> 1; len = sprintf(buf, "%d\n", ival); break; case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { OP_STATE(ha->ip_config.tcp_options, TCPOPT_TIMESTAMP_ENABLE, pval); } else { OP_STATE(ha->ip_config.ipv6_tcp_options, IPV6_TCPOPT_TIMESTAMP_EN, pval); } len = sprintf(buf, "%s\n", pval); break; case ISCSI_NET_PARAM_CACHE_ID: if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) len = sprintf(buf, "%d\n", ha->ip_config.ipv4_cache_id); else len = sprintf(buf, "%d\n", ha->ip_config.ipv6_cache_id); break; case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN: OP_STATE(ha->ip_config.tcp_options, TCPOPT_DNS_SERVER_IP_EN, pval); len = sprintf(buf, "%s\n", pval); break; case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN: OP_STATE(ha->ip_config.tcp_options, TCPOPT_SLP_DA_INFO_EN, pval); len = sprintf(buf, "%s\n", pval); break; case ISCSI_NET_PARAM_IPV4_TOS_EN: OP_STATE(ha->ip_config.ipv4_options, IPOPT_IPV4_TOS_EN, pval); len = sprintf(buf, "%s\n", pval); break; case ISCSI_NET_PARAM_IPV4_TOS: len = sprintf(buf, "%d\n", ha->ip_config.ipv4_tos); break; case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN: OP_STATE(ha->ip_config.ipv4_options, IPOPT_GRAT_ARP_EN, pval); len = sprintf(buf, "%s\n", pval); break; case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN: OP_STATE(ha->ip_config.ipv4_options, IPOPT_ALT_CID_EN, pval); len = sprintf(buf, "%s\n", pval); break; case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID: pval = (ha->ip_config.ipv4_alt_cid_len) ? (char *)ha->ip_config.ipv4_alt_cid : ""; len = sprintf(buf, "%s\n", pval); break; case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN: OP_STATE(ha->ip_config.ipv4_options, IPOPT_REQ_VID_EN, pval); len = sprintf(buf, "%s\n", pval); break; case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN: OP_STATE(ha->ip_config.ipv4_options, IPOPT_USE_VID_EN, pval); len = sprintf(buf, "%s\n", pval); break; case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID: pval = (ha->ip_config.ipv4_vid_len) ? (char *)ha->ip_config.ipv4_vid : ""; len = sprintf(buf, "%s\n", pval); break; case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN: OP_STATE(ha->ip_config.ipv4_options, IPOPT_LEARN_IQN_EN, pval); len = sprintf(buf, "%s\n", pval); break; case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE: OP_STATE(~ha->ip_config.ipv4_options, IPOPT_FRAGMENTATION_DISABLE, pval); len = sprintf(buf, "%s\n", pval); break; case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN: OP_STATE(ha->ip_config.ipv4_options, IPOPT_IN_FORWARD_EN, pval); len = sprintf(buf, "%s\n", pval); break; case ISCSI_NET_PARAM_REDIRECT_EN: if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { OP_STATE(ha->ip_config.ipv4_options, IPOPT_ARP_REDIRECT_EN, pval); } else { OP_STATE(ha->ip_config.ipv6_options, IPV6_OPT_REDIRECT_EN, pval); } len = sprintf(buf, "%s\n", pval); break; case ISCSI_NET_PARAM_IPV4_TTL: len = sprintf(buf, "%d\n", ha->ip_config.ipv4_ttl); break; case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN: OP_STATE(ha->ip_config.ipv6_options, IPV6_OPT_GRAT_NEIGHBOR_ADV_EN, pval); len = sprintf(buf, "%s\n", pval); break; case ISCSI_NET_PARAM_IPV6_MLD_EN: OP_STATE(ha->ip_config.ipv6_addl_options, IPV6_ADDOPT_MLD_EN, pval); len = sprintf(buf, "%s\n", pval); break; case ISCSI_NET_PARAM_IPV6_FLOW_LABEL: len = sprintf(buf, "%u\n", ha->ip_config.ipv6_flow_lbl); break; case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS: len = sprintf(buf, "%d\n", ha->ip_config.ipv6_traffic_class); break; case ISCSI_NET_PARAM_IPV6_HOP_LIMIT: len = sprintf(buf, "%d\n", ha->ip_config.ipv6_hop_limit); break; case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO: len = sprintf(buf, "%d\n", ha->ip_config.ipv6_nd_reach_time); break; case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME: len = sprintf(buf, "%d\n", ha->ip_config.ipv6_nd_rexmit_timer); break; case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO: len = sprintf(buf, "%d\n", ha->ip_config.ipv6_nd_stale_timeout); break; case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT: len = sprintf(buf, "%d\n", ha->ip_config.ipv6_dup_addr_detect_count); break; case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU: len = sprintf(buf, "%d\n", ha->ip_config.ipv6_gw_advrt_mtu); break; default: len = -ENOSYS; } } else if (param_type == ISCSI_IFACE_PARAM) { switch (param) { case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO: len = sprintf(buf, "%d\n", ha->ip_config.def_timeout); break; case ISCSI_IFACE_PARAM_HDRDGST_EN: OP_STATE(ha->ip_config.iscsi_options, ISCSIOPTS_HEADER_DIGEST_EN, pval); len = sprintf(buf, "%s\n", pval); break; case ISCSI_IFACE_PARAM_DATADGST_EN: OP_STATE(ha->ip_config.iscsi_options, ISCSIOPTS_DATA_DIGEST_EN, pval); len = sprintf(buf, "%s\n", pval); break; case ISCSI_IFACE_PARAM_IMM_DATA_EN: OP_STATE(ha->ip_config.iscsi_options, ISCSIOPTS_IMMEDIATE_DATA_EN, pval); len = sprintf(buf, "%s\n", pval); break; case ISCSI_IFACE_PARAM_INITIAL_R2T_EN: OP_STATE(ha->ip_config.iscsi_options, ISCSIOPTS_INITIAL_R2T_EN, pval); len = sprintf(buf, "%s\n", pval); break; case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN: OP_STATE(ha->ip_config.iscsi_options, ISCSIOPTS_DATA_SEQ_INORDER_EN, pval); len = sprintf(buf, "%s\n", pval); break; case ISCSI_IFACE_PARAM_PDU_INORDER_EN: OP_STATE(ha->ip_config.iscsi_options, ISCSIOPTS_DATA_PDU_INORDER_EN, pval); len = sprintf(buf, "%s\n", pval); break; case ISCSI_IFACE_PARAM_ERL: len = sprintf(buf, "%d\n", (ha->ip_config.iscsi_options & ISCSIOPTS_ERL)); break; case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH: len = sprintf(buf, "%u\n", ha->ip_config.iscsi_max_pdu_size * BYTE_UNITS); break; case ISCSI_IFACE_PARAM_FIRST_BURST: len = sprintf(buf, "%u\n", ha->ip_config.iscsi_first_burst_len * BYTE_UNITS); break; case ISCSI_IFACE_PARAM_MAX_R2T: len = sprintf(buf, "%d\n", ha->ip_config.iscsi_max_outstnd_r2t); break; case ISCSI_IFACE_PARAM_MAX_BURST: len = sprintf(buf, "%u\n", ha->ip_config.iscsi_max_burst_len * BYTE_UNITS); break; case ISCSI_IFACE_PARAM_CHAP_AUTH_EN: OP_STATE(ha->ip_config.iscsi_options, ISCSIOPTS_CHAP_AUTH_EN, pval); len = sprintf(buf, "%s\n", pval); break; case ISCSI_IFACE_PARAM_BIDI_CHAP_EN: OP_STATE(ha->ip_config.iscsi_options, ISCSIOPTS_BIDI_CHAP_EN, pval); len = sprintf(buf, "%s\n", pval); break; case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL: OP_STATE(ha->ip_config.iscsi_options, ISCSIOPTS_DISCOVERY_AUTH_EN, pval); len = sprintf(buf, "%s\n", pval); break; case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN: OP_STATE(ha->ip_config.iscsi_options, ISCSIOPTS_DISCOVERY_LOGOUT_EN, pval); len = sprintf(buf, "%s\n", pval); break; case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN: OP_STATE(ha->ip_config.iscsi_options, ISCSIOPTS_STRICT_LOGIN_COMP_EN, pval); len = sprintf(buf, "%s\n", pval); break; case ISCSI_IFACE_PARAM_INITIATOR_NAME: len = sprintf(buf, "%s\n", ha->ip_config.iscsi_name); break; default: len = -ENOSYS; } } return len; } static struct iscsi_endpoint * qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, int non_blocking) { int ret; struct iscsi_endpoint *ep; struct qla_endpoint *qla_ep; struct scsi_qla_host *ha; struct sockaddr_in *addr; struct sockaddr_in6 *addr6; if (!shost) { ret = -ENXIO; pr_err("%s: shost is NULL\n", __func__); return ERR_PTR(ret); } ha = iscsi_host_priv(shost); ep = iscsi_create_endpoint(sizeof(struct qla_endpoint)); if (!ep) { ret = -ENOMEM; return ERR_PTR(ret); } qla_ep = ep->dd_data; memset(qla_ep, 0, sizeof(struct qla_endpoint)); if (dst_addr->sa_family == AF_INET) { memcpy(&qla_ep->dst_addr, dst_addr, sizeof(struct sockaddr_in)); addr = (struct sockaddr_in *)&qla_ep->dst_addr; DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI4\n", __func__, (char *)&addr->sin_addr)); } else if (dst_addr->sa_family == AF_INET6) { memcpy(&qla_ep->dst_addr, dst_addr, sizeof(struct sockaddr_in6)); addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr; DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__, (char *)&addr6->sin6_addr)); } else { ql4_printk(KERN_WARNING, ha, "%s: Invalid endpoint\n", __func__); } qla_ep->host = shost; return ep; } static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) { struct qla_endpoint *qla_ep; struct scsi_qla_host *ha; int ret = 0; qla_ep = ep->dd_data; ha = to_qla_host(qla_ep->host); DEBUG2(pr_info_ratelimited("%s: host: %ld\n", __func__, ha->host_no)); if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags)) ret = 1; return ret; } static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep) { struct qla_endpoint *qla_ep; struct scsi_qla_host *ha; qla_ep = ep->dd_data; ha = to_qla_host(qla_ep->host); DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, ha->host_no)); iscsi_destroy_endpoint(ep); } static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param param, char *buf) { struct qla_endpoint *qla_ep = ep->dd_data; struct sockaddr *dst_addr; struct scsi_qla_host *ha; if (!qla_ep) return -ENOTCONN; ha = to_qla_host(qla_ep->host); DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, ha->host_no)); switch (param) { case ISCSI_PARAM_CONN_PORT: case ISCSI_PARAM_CONN_ADDRESS: dst_addr = (struct sockaddr *)&qla_ep->dst_addr; if (!dst_addr) return -ENOTCONN; return iscsi_conn_get_addr_param((struct sockaddr_storage *) &qla_ep->dst_addr, param, buf); default: return -ENOSYS; } } static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats) { struct iscsi_session *sess; struct iscsi_cls_session *cls_sess; struct ddb_entry *ddb_entry; struct scsi_qla_host *ha; struct ql_iscsi_stats *ql_iscsi_stats; int stats_size; int ret; dma_addr_t iscsi_stats_dma; cls_sess = iscsi_conn_to_session(cls_conn); sess = cls_sess->dd_data; ddb_entry = sess->dd_data; ha = ddb_entry->ha; DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, ha->host_no)); stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats)); /* Allocate memory */ ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size, &iscsi_stats_dma, GFP_KERNEL); if (!ql_iscsi_stats) { ql4_printk(KERN_ERR, ha, "Unable to allocate memory for iscsi stats\n"); goto exit_get_stats; } ret = qla4xxx_get_mgmt_data(ha, ddb_entry->fw_ddb_index, stats_size, iscsi_stats_dma); if (ret != QLA_SUCCESS) { ql4_printk(KERN_ERR, ha, "Unable to retrieve iscsi stats\n"); goto free_stats; } /* octets */ stats->txdata_octets = le64_to_cpu(ql_iscsi_stats->tx_data_octets); stats->rxdata_octets = le64_to_cpu(ql_iscsi_stats->rx_data_octets); /* xmit pdus */ stats->noptx_pdus = le32_to_cpu(ql_iscsi_stats->tx_nopout_pdus); stats->scsicmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_cmd_pdus); stats->tmfcmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_tmf_cmd_pdus); stats->login_pdus = le32_to_cpu(ql_iscsi_stats->tx_login_cmd_pdus); stats->text_pdus = le32_to_cpu(ql_iscsi_stats->tx_text_cmd_pdus); stats->dataout_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_write_pdus); stats->logout_pdus = le32_to_cpu(ql_iscsi_stats->tx_logout_cmd_pdus); stats->snack_pdus = le32_to_cpu(ql_iscsi_stats->tx_snack_req_pdus); /* recv pdus */ stats->noprx_pdus = le32_to_cpu(ql_iscsi_stats->rx_nopin_pdus); stats->scsirsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_resp_pdus); stats->tmfrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_tmf_resp_pdus); stats->textrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_text_resp_pdus); stats->datain_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_read_pdus); stats->logoutrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_logout_resp_pdus); stats->r2t_pdus = le32_to_cpu(ql_iscsi_stats->rx_r2t_pdus); stats->async_pdus = le32_to_cpu(ql_iscsi_stats->rx_async_pdus); stats->rjt_pdus = le32_to_cpu(ql_iscsi_stats->rx_reject_pdus); free_stats: dma_free_coherent(&ha->pdev->dev, stats_size, ql_iscsi_stats, iscsi_stats_dma); exit_get_stats: return; } static enum scsi_timeout_action qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc) { struct iscsi_cls_session *session; unsigned long flags; enum scsi_timeout_action ret = SCSI_EH_NOT_HANDLED; session = starget_to_session(scsi_target(sc->device)); spin_lock_irqsave(&session->lock, flags); if (session->state == ISCSI_SESSION_FAILED) ret = SCSI_EH_RESET_TIMER; spin_unlock_irqrestore(&session->lock, flags); return ret; } static void qla4xxx_set_port_speed(struct Scsi_Host *shost) { struct scsi_qla_host *ha = to_qla_host(shost); struct iscsi_cls_host *ihost = shost->shost_data; uint32_t speed = ISCSI_PORT_SPEED_UNKNOWN; qla4xxx_get_firmware_state(ha); switch (ha->addl_fw_state & 0x0F00) { case FW_ADDSTATE_LINK_SPEED_10MBPS: speed = ISCSI_PORT_SPEED_10MBPS; break; case FW_ADDSTATE_LINK_SPEED_100MBPS: speed = ISCSI_PORT_SPEED_100MBPS; break; case FW_ADDSTATE_LINK_SPEED_1GBPS: speed = ISCSI_PORT_SPEED_1GBPS; break; case FW_ADDSTATE_LINK_SPEED_10GBPS: speed = ISCSI_PORT_SPEED_10GBPS; break; } ihost->port_speed = speed; } static void qla4xxx_set_port_state(struct Scsi_Host *shost) { struct scsi_qla_host *ha = to_qla_host(shost); struct iscsi_cls_host *ihost = shost->shost_data; uint32_t state = ISCSI_PORT_STATE_DOWN; if (test_bit(AF_LINK_UP, &ha->flags)) state = ISCSI_PORT_STATE_UP; ihost->port_state = state; } static int qla4xxx_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param, char *buf) { struct scsi_qla_host *ha = to_qla_host(shost); int len; switch (param) { case ISCSI_HOST_PARAM_HWADDRESS: len = sysfs_format_mac(buf, ha->my_mac, MAC_ADDR_LEN); break; case ISCSI_HOST_PARAM_IPADDRESS: len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address); break; case ISCSI_HOST_PARAM_INITIATOR_NAME: len = sprintf(buf, "%s\n", ha->name_string); break; case ISCSI_HOST_PARAM_PORT_STATE: qla4xxx_set_port_state(shost); len = sprintf(buf, "%s\n", iscsi_get_port_state_name(shost)); break; case ISCSI_HOST_PARAM_PORT_SPEED: qla4xxx_set_port_speed(shost); len = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost)); break; default: return -ENOSYS; } return len; } static void qla4xxx_create_ipv4_iface(struct scsi_qla_host *ha) { if (ha->iface_ipv4) return; /* IPv4 */ ha->iface_ipv4 = iscsi_create_iface(ha->host, &qla4xxx_iscsi_transport, ISCSI_IFACE_TYPE_IPV4, 0, 0); if (!ha->iface_ipv4) ql4_printk(KERN_ERR, ha, "Could not create IPv4 iSCSI " "iface0.\n"); } static void qla4xxx_create_ipv6_iface(struct scsi_qla_host *ha) { if (!ha->iface_ipv6_0) /* IPv6 iface-0 */ ha->iface_ipv6_0 = iscsi_create_iface(ha->host, &qla4xxx_iscsi_transport, ISCSI_IFACE_TYPE_IPV6, 0, 0); if (!ha->iface_ipv6_0) ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI " "iface0.\n"); if (!ha->iface_ipv6_1) /* IPv6 iface-1 */ ha->iface_ipv6_1 = iscsi_create_iface(ha->host, &qla4xxx_iscsi_transport, ISCSI_IFACE_TYPE_IPV6, 1, 0); if (!ha->iface_ipv6_1) ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI " "iface1.\n"); } static void qla4xxx_create_ifaces(struct scsi_qla_host *ha) { if (ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE) qla4xxx_create_ipv4_iface(ha); if (ha->ip_config.ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE) qla4xxx_create_ipv6_iface(ha); } static void qla4xxx_destroy_ipv4_iface(struct scsi_qla_host *ha) { if (ha->iface_ipv4) { iscsi_destroy_iface(ha->iface_ipv4); ha->iface_ipv4 = NULL; } } static void qla4xxx_destroy_ipv6_iface(struct scsi_qla_host *ha) { if (ha->iface_ipv6_0) { iscsi_destroy_iface(ha->iface_ipv6_0); ha->iface_ipv6_0 = NULL; } if (ha->iface_ipv6_1) { iscsi_destroy_iface(ha->iface_ipv6_1); ha->iface_ipv6_1 = NULL; } } static void qla4xxx_destroy_ifaces(struct scsi_qla_host *ha) { qla4xxx_destroy_ipv4_iface(ha); qla4xxx_destroy_ipv6_iface(ha); } static void qla4xxx_set_ipv6(struct scsi_qla_host *ha, struct iscsi_iface_param_info *iface_param, struct addr_ctrl_blk *init_fw_cb) { /* * iface_num 0 is valid for IPv6 Addr, linklocal, router, autocfg. * iface_num 1 is valid only for IPv6 Addr. */ switch (iface_param->param) { case ISCSI_NET_PARAM_IPV6_ADDR: if (iface_param->iface_num & 0x1) /* IPv6 Addr 1 */ memcpy(init_fw_cb->ipv6_addr1, iface_param->value, sizeof(init_fw_cb->ipv6_addr1)); else /* IPv6 Addr 0 */ memcpy(init_fw_cb->ipv6_addr0, iface_param->value, sizeof(init_fw_cb->ipv6_addr0)); break; case ISCSI_NET_PARAM_IPV6_LINKLOCAL: if (iface_param->iface_num & 0x1) break; memcpy(init_fw_cb->ipv6_if_id, &iface_param->value[8], sizeof(init_fw_cb->ipv6_if_id)); break; case ISCSI_NET_PARAM_IPV6_ROUTER: if (iface_param->iface_num & 0x1) break; memcpy(init_fw_cb->ipv6_dflt_rtr_addr, iface_param->value, sizeof(init_fw_cb->ipv6_dflt_rtr_addr)); break; case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG: /* Autocfg applies to even interface */ if (iface_param->iface_num & 0x1) break; if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_DISABLE) init_fw_cb->ipv6_addtl_opts &= cpu_to_le16( ~IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE); else if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_ND_ENABLE) init_fw_cb->ipv6_addtl_opts |= cpu_to_le16( IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE); else ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for IPv6 addr\n"); break; case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG: /* Autocfg applies to even interface */ if (iface_param->iface_num & 0x1) break; if (iface_param->value[0] == ISCSI_IPV6_LINKLOCAL_AUTOCFG_ENABLE) init_fw_cb->ipv6_addtl_opts |= cpu_to_le16( IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR); else if (iface_param->value[0] == ISCSI_IPV6_LINKLOCAL_AUTOCFG_DISABLE) init_fw_cb->ipv6_addtl_opts &= cpu_to_le16( ~IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR); else ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for IPv6 linklocal addr\n"); break; case ISCSI_NET_PARAM_IPV6_ROUTER_AUTOCFG: /* Autocfg applies to even interface */ if (iface_param->iface_num & 0x1) break; if (iface_param->value[0] == ISCSI_IPV6_ROUTER_AUTOCFG_ENABLE) memset(init_fw_cb->ipv6_dflt_rtr_addr, 0, sizeof(init_fw_cb->ipv6_dflt_rtr_addr)); break; case ISCSI_NET_PARAM_IFACE_ENABLE: if (iface_param->value[0] == ISCSI_IFACE_ENABLE) { init_fw_cb->ipv6_opts |= cpu_to_le16(IPV6_OPT_IPV6_PROTOCOL_ENABLE); qla4xxx_create_ipv6_iface(ha); } else { init_fw_cb->ipv6_opts &= cpu_to_le16(~IPV6_OPT_IPV6_PROTOCOL_ENABLE & 0xFFFF); qla4xxx_destroy_ipv6_iface(ha); } break; case ISCSI_NET_PARAM_VLAN_TAG: if (iface_param->len != sizeof(init_fw_cb->ipv6_vlan_tag)) break; init_fw_cb->ipv6_vlan_tag = cpu_to_be16(*(uint16_t *)iface_param->value); break; case ISCSI_NET_PARAM_VLAN_ENABLED: if (iface_param->value[0] == ISCSI_VLAN_ENABLE) init_fw_cb->ipv6_opts |= cpu_to_le16(IPV6_OPT_VLAN_TAGGING_ENABLE); else init_fw_cb->ipv6_opts &= cpu_to_le16(~IPV6_OPT_VLAN_TAGGING_ENABLE); break; case ISCSI_NET_PARAM_MTU: init_fw_cb->eth_mtu_size = cpu_to_le16(*(uint16_t *)iface_param->value); break; case ISCSI_NET_PARAM_PORT: /* Autocfg applies to even interface */ if (iface_param->iface_num & 0x1) break; init_fw_cb->ipv6_port = cpu_to_le16(*(uint16_t *)iface_param->value); break; case ISCSI_NET_PARAM_DELAYED_ACK_EN: if (iface_param->iface_num & 0x1) break; if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) init_fw_cb->ipv6_tcp_opts |= cpu_to_le16(IPV6_TCPOPT_DELAYED_ACK_DISABLE); else init_fw_cb->ipv6_tcp_opts &= cpu_to_le16(~IPV6_TCPOPT_DELAYED_ACK_DISABLE & 0xFFFF); break; case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: if (iface_param->iface_num & 0x1) break; if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) init_fw_cb->ipv6_tcp_opts |= cpu_to_le16(IPV6_TCPOPT_NAGLE_ALGO_DISABLE); else init_fw_cb->ipv6_tcp_opts &= cpu_to_le16(~IPV6_TCPOPT_NAGLE_ALGO_DISABLE); break; case ISCSI_NET_PARAM_TCP_WSF_DISABLE: if (iface_param->iface_num & 0x1) break; if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) init_fw_cb->ipv6_tcp_opts |= cpu_to_le16(IPV6_TCPOPT_WINDOW_SCALE_DISABLE); else init_fw_cb->ipv6_tcp_opts &= cpu_to_le16(~IPV6_TCPOPT_WINDOW_SCALE_DISABLE); break; case ISCSI_NET_PARAM_TCP_WSF: if (iface_param->iface_num & 0x1) break; init_fw_cb->ipv6_tcp_wsf = iface_param->value[0]; break; case ISCSI_NET_PARAM_TCP_TIMER_SCALE: if (iface_param->iface_num & 0x1) break; init_fw_cb->ipv6_tcp_opts &= cpu_to_le16(~IPV6_TCPOPT_TIMER_SCALE); init_fw_cb->ipv6_tcp_opts |= cpu_to_le16((iface_param->value[0] << 1) & IPV6_TCPOPT_TIMER_SCALE); break; case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: if (iface_param->iface_num & 0x1) break; if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) init_fw_cb->ipv6_tcp_opts |= cpu_to_le16(IPV6_TCPOPT_TIMESTAMP_EN); else init_fw_cb->ipv6_tcp_opts &= cpu_to_le16(~IPV6_TCPOPT_TIMESTAMP_EN); break; case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN: if (iface_param->iface_num & 0x1) break; if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) init_fw_cb->ipv6_opts |= cpu_to_le16(IPV6_OPT_GRAT_NEIGHBOR_ADV_EN); else init_fw_cb->ipv6_opts &= cpu_to_le16(~IPV6_OPT_GRAT_NEIGHBOR_ADV_EN); break; case ISCSI_NET_PARAM_REDIRECT_EN: if (iface_param->iface_num & 0x1) break; if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) init_fw_cb->ipv6_opts |= cpu_to_le16(IPV6_OPT_REDIRECT_EN); else init_fw_cb->ipv6_opts &= cpu_to_le16(~IPV6_OPT_REDIRECT_EN); break; case ISCSI_NET_PARAM_IPV6_MLD_EN: if (iface_param->iface_num & 0x1) break; if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) init_fw_cb->ipv6_addtl_opts |= cpu_to_le16(IPV6_ADDOPT_MLD_EN); else init_fw_cb->ipv6_addtl_opts &= cpu_to_le16(~IPV6_ADDOPT_MLD_EN); break; case ISCSI_NET_PARAM_IPV6_FLOW_LABEL: if (iface_param->iface_num & 0x1) break; init_fw_cb->ipv6_flow_lbl = cpu_to_le16(*(uint16_t *)iface_param->value); break; case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS: if (iface_param->iface_num & 0x1) break; init_fw_cb->ipv6_traffic_class = iface_param->value[0]; break; case ISCSI_NET_PARAM_IPV6_HOP_LIMIT: if (iface_param->iface_num & 0x1) break; init_fw_cb->ipv6_hop_limit = iface_param->value[0]; break; case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO: if (iface_param->iface_num & 0x1) break; init_fw_cb->ipv6_nd_reach_time = cpu_to_le32(*(uint32_t *)iface_param->value); break; case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME: if (iface_param->iface_num & 0x1) break; init_fw_cb->ipv6_nd_rexmit_timer = cpu_to_le32(*(uint32_t *)iface_param->value); break; case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO: if (iface_param->iface_num & 0x1) break; init_fw_cb->ipv6_nd_stale_timeout = cpu_to_le32(*(uint32_t *)iface_param->value); break; case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT: if (iface_param->iface_num & 0x1) break; init_fw_cb->ipv6_dup_addr_detect_count = iface_param->value[0]; break; case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU: if (iface_param->iface_num & 0x1) break; init_fw_cb->ipv6_gw_advrt_mtu = cpu_to_le32(*(uint32_t *)iface_param->value); break; default: ql4_printk(KERN_ERR, ha, "Unknown IPv6 param = %d\n", iface_param->param); break; } } static void qla4xxx_set_ipv4(struct scsi_qla_host *ha, struct iscsi_iface_param_info *iface_param, struct addr_ctrl_blk *init_fw_cb) { switch (iface_param->param) { case ISCSI_NET_PARAM_IPV4_ADDR: memcpy(init_fw_cb->ipv4_addr, iface_param->value, sizeof(init_fw_cb->ipv4_addr)); break; case ISCSI_NET_PARAM_IPV4_SUBNET: memcpy(init_fw_cb->ipv4_subnet, iface_param->value, sizeof(init_fw_cb->ipv4_subnet)); break; case ISCSI_NET_PARAM_IPV4_GW: memcpy(init_fw_cb->ipv4_gw_addr, iface_param->value, sizeof(init_fw_cb->ipv4_gw_addr)); break; case ISCSI_NET_PARAM_IPV4_BOOTPROTO: if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP) init_fw_cb->ipv4_tcp_opts |= cpu_to_le16(TCPOPT_DHCP_ENABLE); else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC) init_fw_cb->ipv4_tcp_opts &= cpu_to_le16(~TCPOPT_DHCP_ENABLE); else ql4_printk(KERN_ERR, ha, "Invalid IPv4 bootproto\n"); break; case ISCSI_NET_PARAM_IFACE_ENABLE: if (iface_param->value[0] == ISCSI_IFACE_ENABLE) { init_fw_cb->ipv4_ip_opts |= cpu_to_le16(IPOPT_IPV4_PROTOCOL_ENABLE); qla4xxx_create_ipv4_iface(ha); } else { init_fw_cb->ipv4_ip_opts &= cpu_to_le16(~IPOPT_IPV4_PROTOCOL_ENABLE & 0xFFFF); qla4xxx_destroy_ipv4_iface(ha); } break; case ISCSI_NET_PARAM_VLAN_TAG: if (iface_param->len != sizeof(init_fw_cb->ipv4_vlan_tag)) break; init_fw_cb->ipv4_vlan_tag = cpu_to_be16(*(uint16_t *)iface_param->value); break; case ISCSI_NET_PARAM_VLAN_ENABLED: if (iface_param->value[0] == ISCSI_VLAN_ENABLE) init_fw_cb->ipv4_ip_opts |= cpu_to_le16(IPOPT_VLAN_TAGGING_ENABLE); else init_fw_cb->ipv4_ip_opts &= cpu_to_le16(~IPOPT_VLAN_TAGGING_ENABLE); break; case ISCSI_NET_PARAM_MTU: init_fw_cb->eth_mtu_size = cpu_to_le16(*(uint16_t *)iface_param->value); break; case ISCSI_NET_PARAM_PORT: init_fw_cb->ipv4_port = cpu_to_le16(*(uint16_t *)iface_param->value); break; case ISCSI_NET_PARAM_DELAYED_ACK_EN: if (iface_param->iface_num & 0x1) break; if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) init_fw_cb->ipv4_tcp_opts |= cpu_to_le16(TCPOPT_DELAYED_ACK_DISABLE); else init_fw_cb->ipv4_tcp_opts &= cpu_to_le16(~TCPOPT_DELAYED_ACK_DISABLE & 0xFFFF); break; case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: if (iface_param->iface_num & 0x1) break; if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) init_fw_cb->ipv4_tcp_opts |= cpu_to_le16(TCPOPT_NAGLE_ALGO_DISABLE); else init_fw_cb->ipv4_tcp_opts &= cpu_to_le16(~TCPOPT_NAGLE_ALGO_DISABLE); break; case ISCSI_NET_PARAM_TCP_WSF_DISABLE: if (iface_param->iface_num & 0x1) break; if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) init_fw_cb->ipv4_tcp_opts |= cpu_to_le16(TCPOPT_WINDOW_SCALE_DISABLE); else init_fw_cb->ipv4_tcp_opts &= cpu_to_le16(~TCPOPT_WINDOW_SCALE_DISABLE); break; case ISCSI_NET_PARAM_TCP_WSF: if (iface_param->iface_num & 0x1) break; init_fw_cb->ipv4_tcp_wsf = iface_param->value[0]; break; case ISCSI_NET_PARAM_TCP_TIMER_SCALE: if (iface_param->iface_num & 0x1) break; init_fw_cb->ipv4_tcp_opts &= cpu_to_le16(~TCPOPT_TIMER_SCALE); init_fw_cb->ipv4_tcp_opts |= cpu_to_le16((iface_param->value[0] << 1) & TCPOPT_TIMER_SCALE); break; case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: if (iface_param->iface_num & 0x1) break; if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) init_fw_cb->ipv4_tcp_opts |= cpu_to_le16(TCPOPT_TIMESTAMP_ENABLE); else init_fw_cb->ipv4_tcp_opts &= cpu_to_le16(~TCPOPT_TIMESTAMP_ENABLE); break; case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN: if (iface_param->iface_num & 0x1) break; if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) init_fw_cb->ipv4_tcp_opts |= cpu_to_le16(TCPOPT_DNS_SERVER_IP_EN); else init_fw_cb->ipv4_tcp_opts &= cpu_to_le16(~TCPOPT_DNS_SERVER_IP_EN); break; case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN: if (iface_param->iface_num & 0x1) break; if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) init_fw_cb->ipv4_tcp_opts |= cpu_to_le16(TCPOPT_SLP_DA_INFO_EN); else init_fw_cb->ipv4_tcp_opts &= cpu_to_le16(~TCPOPT_SLP_DA_INFO_EN); break; case ISCSI_NET_PARAM_IPV4_TOS_EN: if (iface_param->iface_num & 0x1) break; if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) init_fw_cb->ipv4_ip_opts |= cpu_to_le16(IPOPT_IPV4_TOS_EN); else init_fw_cb->ipv4_ip_opts &= cpu_to_le16(~IPOPT_IPV4_TOS_EN); break; case ISCSI_NET_PARAM_IPV4_TOS: if (iface_param->iface_num & 0x1) break; init_fw_cb->ipv4_tos = iface_param->value[0]; break; case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN: if (iface_param->iface_num & 0x1) break; if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) init_fw_cb->ipv4_ip_opts |= cpu_to_le16(IPOPT_GRAT_ARP_EN); else init_fw_cb->ipv4_ip_opts &= cpu_to_le16(~IPOPT_GRAT_ARP_EN); break; case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN: if (iface_param->iface_num & 0x1) break; if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) init_fw_cb->ipv4_ip_opts |= cpu_to_le16(IPOPT_ALT_CID_EN); else init_fw_cb->ipv4_ip_opts &= cpu_to_le16(~IPOPT_ALT_CID_EN); break; case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID: if (iface_param->iface_num & 0x1) break; memcpy(init_fw_cb->ipv4_dhcp_alt_cid, iface_param->value, (sizeof(init_fw_cb->ipv4_dhcp_alt_cid) - 1)); init_fw_cb->ipv4_dhcp_alt_cid_len = strlen(init_fw_cb->ipv4_dhcp_alt_cid); break; case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN: if (iface_param->iface_num & 0x1) break; if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) init_fw_cb->ipv4_ip_opts |= cpu_to_le16(IPOPT_REQ_VID_EN); else init_fw_cb->ipv4_ip_opts &= cpu_to_le16(~IPOPT_REQ_VID_EN); break; case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN: if (iface_param->iface_num & 0x1) break; if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) init_fw_cb->ipv4_ip_opts |= cpu_to_le16(IPOPT_USE_VID_EN); else init_fw_cb->ipv4_ip_opts &= cpu_to_le16(~IPOPT_USE_VID_EN); break; case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID: if (iface_param->iface_num & 0x1) break; memcpy(init_fw_cb->ipv4_dhcp_vid, iface_param->value, (sizeof(init_fw_cb->ipv4_dhcp_vid) - 1)); init_fw_cb->ipv4_dhcp_vid_len = strlen(init_fw_cb->ipv4_dhcp_vid); break; case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN: if (iface_param->iface_num & 0x1) break; if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) init_fw_cb->ipv4_ip_opts |= cpu_to_le16(IPOPT_LEARN_IQN_EN); else init_fw_cb->ipv4_ip_opts &= cpu_to_le16(~IPOPT_LEARN_IQN_EN); break; case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE: if (iface_param->iface_num & 0x1) break; if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) init_fw_cb->ipv4_ip_opts |= cpu_to_le16(IPOPT_FRAGMENTATION_DISABLE); else init_fw_cb->ipv4_ip_opts &= cpu_to_le16(~IPOPT_FRAGMENTATION_DISABLE); break; case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN: if (iface_param->iface_num & 0x1) break; if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) init_fw_cb->ipv4_ip_opts |= cpu_to_le16(IPOPT_IN_FORWARD_EN); else init_fw_cb->ipv4_ip_opts &= cpu_to_le16(~IPOPT_IN_FORWARD_EN); break; case ISCSI_NET_PARAM_REDIRECT_EN: if (iface_param->iface_num & 0x1) break; if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) init_fw_cb->ipv4_ip_opts |= cpu_to_le16(IPOPT_ARP_REDIRECT_EN); else init_fw_cb->ipv4_ip_opts &= cpu_to_le16(~IPOPT_ARP_REDIRECT_EN); break; case ISCSI_NET_PARAM_IPV4_TTL: if (iface_param->iface_num & 0x1) break; init_fw_cb->ipv4_ttl = iface_param->value[0]; break; default: ql4_printk(KERN_ERR, ha, "Unknown IPv4 param = %d\n", iface_param->param); break; } } static void qla4xxx_set_iscsi_param(struct scsi_qla_host *ha, struct iscsi_iface_param_info *iface_param, struct addr_ctrl_blk *init_fw_cb) { switch (iface_param->param) { case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO: if (iface_param->iface_num & 0x1) break; init_fw_cb->def_timeout = cpu_to_le16(*(uint16_t *)iface_param->value); break; case ISCSI_IFACE_PARAM_HDRDGST_EN: if (iface_param->iface_num & 0x1) break; if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) init_fw_cb->iscsi_opts |= cpu_to_le16(ISCSIOPTS_HEADER_DIGEST_EN); else init_fw_cb->iscsi_opts &= cpu_to_le16(~ISCSIOPTS_HEADER_DIGEST_EN); break; case ISCSI_IFACE_PARAM_DATADGST_EN: if (iface_param->iface_num & 0x1) break; if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) init_fw_cb->iscsi_opts |= cpu_to_le16(ISCSIOPTS_DATA_DIGEST_EN); else init_fw_cb->iscsi_opts &= cpu_to_le16(~ISCSIOPTS_DATA_DIGEST_EN); break; case ISCSI_IFACE_PARAM_IMM_DATA_EN: if (iface_param->iface_num & 0x1) break; if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) init_fw_cb->iscsi_opts |= cpu_to_le16(ISCSIOPTS_IMMEDIATE_DATA_EN); else init_fw_cb->iscsi_opts &= cpu_to_le16(~ISCSIOPTS_IMMEDIATE_DATA_EN); break; case ISCSI_IFACE_PARAM_INITIAL_R2T_EN: if (iface_param->iface_num & 0x1) break; if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) init_fw_cb->iscsi_opts |= cpu_to_le16(ISCSIOPTS_INITIAL_R2T_EN); else init_fw_cb->iscsi_opts &= cpu_to_le16(~ISCSIOPTS_INITIAL_R2T_EN); break; case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN: if (iface_param->iface_num & 0x1) break; if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) init_fw_cb->iscsi_opts |= cpu_to_le16(ISCSIOPTS_DATA_SEQ_INORDER_EN); else init_fw_cb->iscsi_opts &= cpu_to_le16(~ISCSIOPTS_DATA_SEQ_INORDER_EN); break; case ISCSI_IFACE_PARAM_PDU_INORDER_EN: if (iface_param->iface_num & 0x1) break; if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) init_fw_cb->iscsi_opts |= cpu_to_le16(ISCSIOPTS_DATA_PDU_INORDER_EN); else init_fw_cb->iscsi_opts &= cpu_to_le16(~ISCSIOPTS_DATA_PDU_INORDER_EN); break; case ISCSI_IFACE_PARAM_ERL: if (iface_param->iface_num & 0x1) break; init_fw_cb->iscsi_opts &= cpu_to_le16(~ISCSIOPTS_ERL); init_fw_cb->iscsi_opts |= cpu_to_le16(iface_param->value[0] & ISCSIOPTS_ERL); break; case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH: if (iface_param->iface_num & 0x1) break; init_fw_cb->iscsi_max_pdu_size = cpu_to_le32(*(uint32_t *)iface_param->value) / BYTE_UNITS; break; case ISCSI_IFACE_PARAM_FIRST_BURST: if (iface_param->iface_num & 0x1) break; init_fw_cb->iscsi_fburst_len = cpu_to_le32(*(uint32_t *)iface_param->value) / BYTE_UNITS; break; case ISCSI_IFACE_PARAM_MAX_R2T: if (iface_param->iface_num & 0x1) break; init_fw_cb->iscsi_max_outstnd_r2t = cpu_to_le16(*(uint16_t *)iface_param->value); break; case ISCSI_IFACE_PARAM_MAX_BURST: if (iface_param->iface_num & 0x1) break; init_fw_cb->iscsi_max_burst_len = cpu_to_le32(*(uint32_t *)iface_param->value) / BYTE_UNITS; break; case ISCSI_IFACE_PARAM_CHAP_AUTH_EN: if (iface_param->iface_num & 0x1) break; if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) init_fw_cb->iscsi_opts |= cpu_to_le16(ISCSIOPTS_CHAP_AUTH_EN); else init_fw_cb->iscsi_opts &= cpu_to_le16(~ISCSIOPTS_CHAP_AUTH_EN); break; case ISCSI_IFACE_PARAM_BIDI_CHAP_EN: if (iface_param->iface_num & 0x1) break; if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) init_fw_cb->iscsi_opts |= cpu_to_le16(ISCSIOPTS_BIDI_CHAP_EN); else init_fw_cb->iscsi_opts &= cpu_to_le16(~ISCSIOPTS_BIDI_CHAP_EN); break; case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL: if (iface_param->iface_num & 0x1) break; if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) init_fw_cb->iscsi_opts |= cpu_to_le16(ISCSIOPTS_DISCOVERY_AUTH_EN); else init_fw_cb->iscsi_opts &= cpu_to_le16(~ISCSIOPTS_DISCOVERY_AUTH_EN); break; case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN: if (iface_param->iface_num & 0x1) break; if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) init_fw_cb->iscsi_opts |= cpu_to_le16(ISCSIOPTS_DISCOVERY_LOGOUT_EN); else init_fw_cb->iscsi_opts &= cpu_to_le16(~ISCSIOPTS_DISCOVERY_LOGOUT_EN); break; case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN: if (iface_param->iface_num & 0x1) break; if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) init_fw_cb->iscsi_opts |= cpu_to_le16(ISCSIOPTS_STRICT_LOGIN_COMP_EN); else init_fw_cb->iscsi_opts &= cpu_to_le16(~ISCSIOPTS_STRICT_LOGIN_COMP_EN); break; default: ql4_printk(KERN_ERR, ha, "Unknown iscsi param = %d\n", iface_param->param); break; } } static void qla4xxx_initcb_to_acb(struct addr_ctrl_blk *init_fw_cb) { struct addr_ctrl_blk_def *acb; acb = (struct addr_ctrl_blk_def *)init_fw_cb; memset(acb->reserved1, 0, sizeof(acb->reserved1)); memset(acb->reserved2, 0, sizeof(acb->reserved2)); memset(acb->reserved3, 0, sizeof(acb->reserved3)); memset(acb->reserved4, 0, sizeof(acb->reserved4)); memset(acb->reserved5, 0, sizeof(acb->reserved5)); memset(acb->reserved6, 0, sizeof(acb->reserved6)); memset(acb->reserved7, 0, sizeof(acb->reserved7)); memset(acb->reserved8, 0, sizeof(acb->reserved8)); memset(acb->reserved9, 0, sizeof(acb->reserved9)); memset(acb->reserved10, 0, sizeof(acb->reserved10)); memset(acb->reserved11, 0, sizeof(acb->reserved11)); memset(acb->reserved12, 0, sizeof(acb->reserved12)); memset(acb->reserved13, 0, sizeof(acb->reserved13)); memset(acb->reserved14, 0, sizeof(acb->reserved14)); memset(acb->reserved15, 0, sizeof(acb->reserved15)); } static int qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len) { struct scsi_qla_host *ha = to_qla_host(shost); int rval = 0; struct iscsi_iface_param_info *iface_param = NULL; struct addr_ctrl_blk *init_fw_cb = NULL; dma_addr_t init_fw_cb_dma; uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; uint32_t rem = len; struct nlattr *attr; init_fw_cb = dma_alloc_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), &init_fw_cb_dma, GFP_KERNEL); if (!init_fw_cb) { ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n", __func__); return -ENOMEM; } memset(&mbox_cmd, 0, sizeof(mbox_cmd)); memset(&mbox_sts, 0, sizeof(mbox_sts)); if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)) { ql4_printk(KERN_ERR, ha, "%s: get ifcb failed\n", __func__); rval = -EIO; goto exit_init_fw_cb; } nla_for_each_attr(attr, data, len, rem) { if (nla_len(attr) < sizeof(*iface_param)) { rval = -EINVAL; goto exit_init_fw_cb; } iface_param = nla_data(attr); if (iface_param->param_type == ISCSI_NET_PARAM) { switch (iface_param->iface_type) { case ISCSI_IFACE_TYPE_IPV4: switch (iface_param->iface_num) { case 0: qla4xxx_set_ipv4(ha, iface_param, init_fw_cb); break; default: /* Cannot have more than one IPv4 interface */ ql4_printk(KERN_ERR, ha, "Invalid IPv4 iface number = %d\n", iface_param->iface_num); break; } break; case ISCSI_IFACE_TYPE_IPV6: switch (iface_param->iface_num) { case 0: case 1: qla4xxx_set_ipv6(ha, iface_param, init_fw_cb); break; default: /* Cannot have more than two IPv6 interface */ ql4_printk(KERN_ERR, ha, "Invalid IPv6 iface number = %d\n", iface_param->iface_num); break; } break; default: ql4_printk(KERN_ERR, ha, "Invalid iface type\n"); break; } } else if (iface_param->param_type == ISCSI_IFACE_PARAM) { qla4xxx_set_iscsi_param(ha, iface_param, init_fw_cb); } else { continue; } } init_fw_cb->cookie = cpu_to_le32(0x11BEAD5A); rval = qla4xxx_set_flash(ha, init_fw_cb_dma, FLASH_SEGMENT_IFCB, sizeof(struct addr_ctrl_blk), FLASH_OPT_RMW_COMMIT); if (rval != QLA_SUCCESS) { ql4_printk(KERN_ERR, ha, "%s: set flash mbx failed\n", __func__); rval = -EIO; goto exit_init_fw_cb; } rval = qla4xxx_disable_acb(ha); if (rval != QLA_SUCCESS) { ql4_printk(KERN_ERR, ha, "%s: disable acb mbx failed\n", __func__); rval = -EIO; goto exit_init_fw_cb; } wait_for_completion_timeout(&ha->disable_acb_comp, DISABLE_ACB_TOV * HZ); qla4xxx_initcb_to_acb(init_fw_cb); rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma); if (rval != QLA_SUCCESS) { ql4_printk(KERN_ERR, ha, "%s: set acb mbx failed\n", __func__); rval = -EIO; goto exit_init_fw_cb; } memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk)); qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb, init_fw_cb_dma); exit_init_fw_cb: dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), init_fw_cb, init_fw_cb_dma); return rval; } static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess, enum iscsi_param param, char *buf) { struct iscsi_session *sess = cls_sess->dd_data; struct ddb_entry *ddb_entry = sess->dd_data; struct scsi_qla_host *ha = ddb_entry->ha; struct iscsi_cls_conn *cls_conn = ddb_entry->conn; struct ql4_chap_table chap_tbl; int rval, len; uint16_t idx; memset(&chap_tbl, 0, sizeof(chap_tbl)); switch (param) { case ISCSI_PARAM_CHAP_IN_IDX: rval = qla4xxx_get_chap_index(ha, sess->username_in, sess->password_in, BIDI_CHAP, &idx); if (rval) len = sprintf(buf, "\n"); else len = sprintf(buf, "%hu\n", idx); break; case ISCSI_PARAM_CHAP_OUT_IDX: if (ddb_entry->ddb_type == FLASH_DDB) { if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) { idx = ddb_entry->chap_tbl_idx; rval = QLA_SUCCESS; } else { rval = QLA_ERROR; } } else { rval = qla4xxx_get_chap_index(ha, sess->username, sess->password, LOCAL_CHAP, &idx); } if (rval) len = sprintf(buf, "\n"); else len = sprintf(buf, "%hu\n", idx); break; case ISCSI_PARAM_USERNAME: case ISCSI_PARAM_PASSWORD: /* First, populate session username and password for FLASH DDB, * if not already done. This happens when session login fails * for a FLASH DDB. */ if (ddb_entry->ddb_type == FLASH_DDB && ddb_entry->chap_tbl_idx != INVALID_ENTRY && !sess->username && !sess->password) { idx = ddb_entry->chap_tbl_idx; rval = qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name, chap_tbl.secret, idx); if (!rval) { iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME, (char *)chap_tbl.name, strlen((char *)chap_tbl.name)); iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD, (char *)chap_tbl.secret, chap_tbl.secret_len); } } fallthrough; default: return iscsi_session_get_param(cls_sess, param, buf); } return len; } static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param, char *buf) { struct iscsi_conn *conn; struct qla_conn *qla_conn; struct sockaddr *dst_addr; conn = cls_conn->dd_data; qla_conn = conn->dd_data; dst_addr = (struct sockaddr *)&qla_conn->qla_ep->dst_addr; switch (param) { case ISCSI_PARAM_CONN_PORT: case ISCSI_PARAM_CONN_ADDRESS: return iscsi_conn_get_addr_param((struct sockaddr_storage *) dst_addr, param, buf); default: return iscsi_conn_get_param(cls_conn, param, buf); } } int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index) { uint32_t mbx_sts = 0; uint16_t tmp_ddb_index; int ret; get_ddb_index: tmp_ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES); if (tmp_ddb_index >= MAX_DDB_ENTRIES) { DEBUG2(ql4_printk(KERN_INFO, ha, "Free DDB index not available\n")); ret = QLA_ERROR; goto exit_get_ddb_index; } if (test_and_set_bit(tmp_ddb_index, ha->ddb_idx_map)) goto get_ddb_index; DEBUG2(ql4_printk(KERN_INFO, ha, "Found a free DDB index at %d\n", tmp_ddb_index)); ret = qla4xxx_req_ddb_entry(ha, tmp_ddb_index, &mbx_sts); if (ret == QLA_ERROR) { if (mbx_sts == MBOX_STS_COMMAND_ERROR) { ql4_printk(KERN_INFO, ha, "DDB index = %d not available trying next\n", tmp_ddb_index); goto get_ddb_index; } DEBUG2(ql4_printk(KERN_INFO, ha, "Free FW DDB not available\n")); } *ddb_index = tmp_ddb_index; exit_get_ddb_index: return ret; } static int qla4xxx_match_ipaddress(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry, char *existing_ipaddr, char *user_ipaddr) { uint8_t dst_ipaddr[IPv6_ADDR_LEN]; char formatted_ipaddr[DDB_IPADDR_LEN]; int status = QLA_SUCCESS, ret = 0; if (ddb_entry->fw_ddb_entry.options & DDB_OPT_IPV6_DEVICE) { ret = in6_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr, '\0', NULL); if (ret == 0) { status = QLA_ERROR; goto out_match; } ret = sprintf(formatted_ipaddr, "%pI6", dst_ipaddr); } else { ret = in4_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr, '\0', NULL); if (ret == 0) { status = QLA_ERROR; goto out_match; } ret = sprintf(formatted_ipaddr, "%pI4", dst_ipaddr); } if (strcmp(existing_ipaddr, formatted_ipaddr)) status = QLA_ERROR; out_match: return status; } static int qla4xxx_match_fwdb_session(struct scsi_qla_host *ha, struct iscsi_cls_conn *cls_conn) { int idx = 0, max_ddbs, rval; struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn); struct iscsi_session *sess, *existing_sess; struct iscsi_conn *conn, *existing_conn; struct ddb_entry *ddb_entry; sess = cls_sess->dd_data; conn = cls_conn->dd_data; if (sess->targetname == NULL || conn->persistent_address == NULL || conn->persistent_port == 0) return QLA_ERROR; max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : MAX_DEV_DB_ENTRIES; for (idx = 0; idx < max_ddbs; idx++) { ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); if (ddb_entry == NULL) continue; if (ddb_entry->ddb_type != FLASH_DDB) continue; existing_sess = ddb_entry->sess->dd_data; existing_conn = ddb_entry->conn->dd_data; if (existing_sess->targetname == NULL || existing_conn->persistent_address == NULL || existing_conn->persistent_port == 0) continue; DEBUG2(ql4_printk(KERN_INFO, ha, "IQN = %s User IQN = %s\n", existing_sess->targetname, sess->targetname)); DEBUG2(ql4_printk(KERN_INFO, ha, "IP = %s User IP = %s\n", existing_conn->persistent_address, conn->persistent_address)); DEBUG2(ql4_printk(KERN_INFO, ha, "Port = %d User Port = %d\n", existing_conn->persistent_port, conn->persistent_port)); if (strcmp(existing_sess->targetname, sess->targetname)) continue; rval = qla4xxx_match_ipaddress(ha, ddb_entry, existing_conn->persistent_address, conn->persistent_address); if (rval == QLA_ERROR) continue; if (existing_conn->persistent_port != conn->persistent_port) continue; break; } if (idx == max_ddbs) return QLA_ERROR; DEBUG2(ql4_printk(KERN_INFO, ha, "Match found in fwdb sessions\n")); return QLA_SUCCESS; } static struct iscsi_cls_session * qla4xxx_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max, uint16_t qdepth, uint32_t initial_cmdsn) { struct iscsi_cls_session *cls_sess; struct scsi_qla_host *ha; struct qla_endpoint *qla_ep; struct ddb_entry *ddb_entry; uint16_t ddb_index; struct iscsi_session *sess; int ret; if (!ep) { printk(KERN_ERR "qla4xxx: missing ep.\n"); return NULL; } qla_ep = ep->dd_data; ha = to_qla_host(qla_ep->host); DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, ha->host_no)); ret = qla4xxx_get_ddb_index(ha, &ddb_index); if (ret == QLA_ERROR) return NULL; cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host, cmds_max, sizeof(struct ddb_entry), sizeof(struct ql4_task_data), initial_cmdsn, ddb_index); if (!cls_sess) return NULL; sess = cls_sess->dd_data; ddb_entry = sess->dd_data; ddb_entry->fw_ddb_index = ddb_index; ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE; ddb_entry->ha = ha; ddb_entry->sess = cls_sess; ddb_entry->unblock_sess = qla4xxx_unblock_ddb; ddb_entry->ddb_change = qla4xxx_ddb_change; clear_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags); cls_sess->recovery_tmo = ql4xsess_recovery_tmo; ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry; ha->tot_ddbs++; return cls_sess; } static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess) { struct iscsi_session *sess; struct ddb_entry *ddb_entry; struct scsi_qla_host *ha; unsigned long flags, wtime; struct dev_db_entry *fw_ddb_entry = NULL; dma_addr_t fw_ddb_entry_dma; uint32_t ddb_state; int ret; sess = cls_sess->dd_data; ddb_entry = sess->dd_data; ha = ddb_entry->ha; DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, ha->host_no)); fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), &fw_ddb_entry_dma, GFP_KERNEL); if (!fw_ddb_entry) { ql4_printk(KERN_ERR, ha, "%s: Unable to allocate dma buffer\n", __func__); goto destroy_session; } wtime = jiffies + (HZ * LOGOUT_TOV); do { ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry, fw_ddb_entry_dma, NULL, NULL, &ddb_state, NULL, NULL, NULL); if (ret == QLA_ERROR) goto destroy_session; if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) || (ddb_state == DDB_DS_SESSION_FAILED)) goto destroy_session; schedule_timeout_uninterruptible(HZ); } while ((time_after(wtime, jiffies))); destroy_session: qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); if (test_and_clear_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags)) clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map); spin_lock_irqsave(&ha->hardware_lock, flags); qla4xxx_free_ddb(ha, ddb_entry); spin_unlock_irqrestore(&ha->hardware_lock, flags); iscsi_session_teardown(cls_sess); if (fw_ddb_entry) dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry, fw_ddb_entry_dma); } static struct iscsi_cls_conn * qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx) { struct iscsi_cls_conn *cls_conn; struct iscsi_session *sess; struct ddb_entry *ddb_entry; struct scsi_qla_host *ha; cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_idx); if (!cls_conn) { pr_info("%s: Can not create connection for conn_idx = %u\n", __func__, conn_idx); return NULL; } sess = cls_sess->dd_data; ddb_entry = sess->dd_data; ddb_entry->conn = cls_conn; ha = ddb_entry->ha; DEBUG2(ql4_printk(KERN_INFO, ha, "%s: conn_idx = %u\n", __func__, conn_idx)); return cls_conn; } static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session, struct iscsi_cls_conn *cls_conn, uint64_t transport_fd, int is_leading) { struct iscsi_conn *conn; struct qla_conn *qla_conn; struct iscsi_endpoint *ep; struct ddb_entry *ddb_entry; struct scsi_qla_host *ha; struct iscsi_session *sess; sess = cls_session->dd_data; ddb_entry = sess->dd_data; ha = ddb_entry->ha; DEBUG2(ql4_printk(KERN_INFO, ha, "%s: sid = %d, cid = %d\n", __func__, cls_session->sid, cls_conn->cid)); if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) return -EINVAL; ep = iscsi_lookup_endpoint(transport_fd); if (!ep) return -EINVAL; conn = cls_conn->dd_data; qla_conn = conn->dd_data; qla_conn->qla_ep = ep->dd_data; iscsi_put_endpoint(ep); return 0; } static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn) { struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn); struct iscsi_session *sess; struct ddb_entry *ddb_entry; struct scsi_qla_host *ha; struct dev_db_entry *fw_ddb_entry = NULL; dma_addr_t fw_ddb_entry_dma; uint32_t mbx_sts = 0; int ret = 0; int status = QLA_SUCCESS; sess = cls_sess->dd_data; ddb_entry = sess->dd_data; ha = ddb_entry->ha; DEBUG2(ql4_printk(KERN_INFO, ha, "%s: sid = %d, cid = %d\n", __func__, cls_sess->sid, cls_conn->cid)); /* Check if we have matching FW DDB, if yes then do not * login to this target. This could cause target to logout previous * connection */ ret = qla4xxx_match_fwdb_session(ha, cls_conn); if (ret == QLA_SUCCESS) { ql4_printk(KERN_INFO, ha, "Session already exist in FW.\n"); ret = -EEXIST; goto exit_conn_start; } fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), &fw_ddb_entry_dma, GFP_KERNEL); if (!fw_ddb_entry) { ql4_printk(KERN_ERR, ha, "%s: Unable to allocate dma buffer\n", __func__); ret = -ENOMEM; goto exit_conn_start; } ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts); if (ret) { /* If iscsid is stopped and started then no need to do * set param again since ddb state will be already * active and FW does not allow set ddb to an * active session. */ if (mbx_sts) if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) { ddb_entry->unblock_sess(ddb_entry->sess); goto exit_set_param; } ql4_printk(KERN_ERR, ha, "%s: Failed set param for index[%d]\n", __func__, ddb_entry->fw_ddb_index); goto exit_conn_start; } status = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index); if (status == QLA_ERROR) { ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__, sess->targetname); ret = -EINVAL; goto exit_conn_start; } if (ddb_entry->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE) ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS; DEBUG2(printk(KERN_INFO "%s: DDB state [%d]\n", __func__, ddb_entry->fw_ddb_device_state)); exit_set_param: ret = 0; exit_conn_start: if (fw_ddb_entry) dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry, fw_ddb_entry_dma); return ret; } static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn) { struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn); struct iscsi_session *sess; struct scsi_qla_host *ha; struct ddb_entry *ddb_entry; int options; sess = cls_sess->dd_data; ddb_entry = sess->dd_data; ha = ddb_entry->ha; DEBUG2(ql4_printk(KERN_INFO, ha, "%s: cid = %d\n", __func__, cls_conn->cid)); options = LOGOUT_OPTION_CLOSE_SESSION; if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__); } static void qla4xxx_task_work(struct work_struct *wdata) { struct ql4_task_data *task_data; struct scsi_qla_host *ha; struct passthru_status *sts; struct iscsi_task *task; struct iscsi_hdr *hdr; uint8_t *data; uint32_t data_len; struct iscsi_conn *conn; int hdr_len; itt_t itt; task_data = container_of(wdata, struct ql4_task_data, task_work); ha = task_data->ha; task = task_data->task; sts = &task_data->sts; hdr_len = sizeof(struct iscsi_hdr); DEBUG3(printk(KERN_INFO "Status returned\n")); DEBUG3(qla4xxx_dump_buffer(sts, 64)); DEBUG3(printk(KERN_INFO "Response buffer")); DEBUG3(qla4xxx_dump_buffer(task_data->resp_buffer, 64)); conn = task->conn; switch (sts->completionStatus) { case PASSTHRU_STATUS_COMPLETE: hdr = (struct iscsi_hdr *)task_data->resp_buffer; /* Assign back the itt in hdr, until we use the PREASSIGN_TAG */ itt = sts->handle; hdr->itt = itt; data = task_data->resp_buffer + hdr_len; data_len = task_data->resp_len - hdr_len; iscsi_complete_pdu(conn, hdr, data, data_len); break; default: ql4_printk(KERN_ERR, ha, "Passthru failed status = 0x%x\n", sts->completionStatus); break; } return; } static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode) { struct ql4_task_data *task_data; struct iscsi_session *sess; struct ddb_entry *ddb_entry; struct scsi_qla_host *ha; int hdr_len; sess = task->conn->session; ddb_entry = sess->dd_data; ha = ddb_entry->ha; task_data = task->dd_data; memset(task_data, 0, sizeof(struct ql4_task_data)); if (task->sc) { ql4_printk(KERN_INFO, ha, "%s: SCSI Commands not implemented\n", __func__); return -EINVAL; } hdr_len = sizeof(struct iscsi_hdr); task_data->ha = ha; task_data->task = task; if (task->data_count) { task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data, task->data_count, DMA_TO_DEVICE); } DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n", __func__, task->conn->max_recv_dlength, hdr_len)); task_data->resp_len = task->conn->max_recv_dlength + hdr_len; task_data->resp_buffer = dma_alloc_coherent(&ha->pdev->dev, task_data->resp_len, &task_data->resp_dma, GFP_ATOMIC); if (!task_data->resp_buffer) goto exit_alloc_pdu; task_data->req_len = task->data_count + hdr_len; task_data->req_buffer = dma_alloc_coherent(&ha->pdev->dev, task_data->req_len, &task_data->req_dma, GFP_ATOMIC); if (!task_data->req_buffer) goto exit_alloc_pdu; task->hdr = task_data->req_buffer; INIT_WORK(&task_data->task_work, qla4xxx_task_work); return 0; exit_alloc_pdu: if (task_data->resp_buffer) dma_free_coherent(&ha->pdev->dev, task_data->resp_len, task_data->resp_buffer, task_data->resp_dma); if (task_data->req_buffer) dma_free_coherent(&ha->pdev->dev, task_data->req_len, task_data->req_buffer, task_data->req_dma); return -ENOMEM; } static void qla4xxx_task_cleanup(struct iscsi_task *task) { struct ql4_task_data *task_data; struct iscsi_session *sess; struct ddb_entry *ddb_entry; struct scsi_qla_host *ha; int hdr_len; hdr_len = sizeof(struct iscsi_hdr); sess = task->conn->session; ddb_entry = sess->dd_data; ha = ddb_entry->ha; task_data = task->dd_data; if (task->data_count) { dma_unmap_single(&ha->pdev->dev, task_data->data_dma, task->data_count, DMA_TO_DEVICE); } DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n", __func__, task->conn->max_recv_dlength, hdr_len)); dma_free_coherent(&ha->pdev->dev, task_data->resp_len, task_data->resp_buffer, task_data->resp_dma); dma_free_coherent(&ha->pdev->dev, task_data->req_len, task_data->req_buffer, task_data->req_dma); return; } static int qla4xxx_task_xmit(struct iscsi_task *task) { struct scsi_cmnd *sc = task->sc; struct iscsi_session *sess = task->conn->session; struct ddb_entry *ddb_entry = sess->dd_data; struct scsi_qla_host *ha = ddb_entry->ha; if (!sc) return qla4xxx_send_passthru0(task); ql4_printk(KERN_INFO, ha, "%s: scsi cmd xmit not implemented\n", __func__); return -ENOSYS; } static int qla4xxx_copy_from_fwddb_param(struct iscsi_bus_flash_session *sess, struct iscsi_bus_flash_conn *conn, struct dev_db_entry *fw_ddb_entry) { unsigned long options = 0; int rc = 0; options = le16_to_cpu(fw_ddb_entry->options); conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options); if (test_bit(OPT_IPV6_DEVICE, &options)) { rc = iscsi_switch_str_param(&sess->portal_type, PORTAL_TYPE_IPV6); if (rc) goto exit_copy; } else { rc = iscsi_switch_str_param(&sess->portal_type, PORTAL_TYPE_IPV4); if (rc) goto exit_copy; } sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE, &options); sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options); sess->entry_state = test_bit(OPT_ENTRY_STATE, &options); options = le16_to_cpu(fw_ddb_entry->iscsi_options); conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options); conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options); sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options); sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options); sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER, &options); sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options); sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options); conn->snack_req_en = test_bit(ISCSIOPT_SNACK_REQ_EN, &options); sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN, &options); sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options); sess->discovery_auth_optional = test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options); if (test_bit(ISCSIOPT_ERL1, &options)) sess->erl |= BIT_1; if (test_bit(ISCSIOPT_ERL0, &options)) sess->erl |= BIT_0; options = le16_to_cpu(fw_ddb_entry->tcp_options); conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options); conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options); conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options); if (test_bit(TCPOPT_TIMER_SCALE3, &options)) conn->tcp_timer_scale |= BIT_3; if (test_bit(TCPOPT_TIMER_SCALE2, &options)) conn->tcp_timer_scale |= BIT_2; if (test_bit(TCPOPT_TIMER_SCALE1, &options)) conn->tcp_timer_scale |= BIT_1; conn->tcp_timer_scale >>= 1; conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options); options = le16_to_cpu(fw_ddb_entry->ip_options); conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options); conn->max_recv_dlength = BYTE_UNITS * le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len); conn->max_xmit_dlength = BYTE_UNITS * le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len); sess->first_burst = BYTE_UNITS * le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len); sess->max_burst = BYTE_UNITS * le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len); sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t); sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain); sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss); conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf; conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf; conn->ipv6_flow_label = le16_to_cpu(fw_ddb_entry->ipv6_flow_lbl); conn->keepalive_timeout = le16_to_cpu(fw_ddb_entry->ka_timeout); conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port); conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn); conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn); sess->discovery_parent_idx = le16_to_cpu(fw_ddb_entry->ddb_link); sess->discovery_parent_type = le16_to_cpu(fw_ddb_entry->ddb_link); sess->chap_out_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx); sess->tsid = le16_to_cpu(fw_ddb_entry->tsid); sess->default_taskmgmt_timeout = le16_to_cpu(fw_ddb_entry->def_timeout); conn->port = le16_to_cpu(fw_ddb_entry->port); options = le16_to_cpu(fw_ddb_entry->options); conn->ipaddress = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL); if (!conn->ipaddress) { rc = -ENOMEM; goto exit_copy; } conn->redirect_ipaddr = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL); if (!conn->redirect_ipaddr) { rc = -ENOMEM; goto exit_copy; } memcpy(conn->ipaddress, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN); memcpy(conn->redirect_ipaddr, fw_ddb_entry->tgt_addr, IPv6_ADDR_LEN); if (test_bit(OPT_IPV6_DEVICE, &options)) { conn->ipv6_traffic_class = fw_ddb_entry->ipv4_tos; conn->link_local_ipv6_addr = kmemdup( fw_ddb_entry->link_local_ipv6_addr, IPv6_ADDR_LEN, GFP_KERNEL); if (!conn->link_local_ipv6_addr) { rc = -ENOMEM; goto exit_copy; } } else { conn->ipv4_tos = fw_ddb_entry->ipv4_tos; } if (fw_ddb_entry->iscsi_name[0]) { rc = iscsi_switch_str_param(&sess->targetname, (char *)fw_ddb_entry->iscsi_name); if (rc) goto exit_copy; } if (fw_ddb_entry->iscsi_alias[0]) { rc = iscsi_switch_str_param(&sess->targetalias, (char *)fw_ddb_entry->iscsi_alias); if (rc) goto exit_copy; } COPY_ISID(sess->isid, fw_ddb_entry->isid); exit_copy: return rc; } static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess, struct iscsi_bus_flash_conn *conn, struct dev_db_entry *fw_ddb_entry) { uint16_t options; options = le16_to_cpu(fw_ddb_entry->options); SET_BITVAL(conn->is_fw_assigned_ipv6, options, BIT_11); if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4)) options |= BIT_8; else options &= ~BIT_8; SET_BITVAL(sess->auto_snd_tgt_disable, options, BIT_6); SET_BITVAL(sess->discovery_sess, options, BIT_4); SET_BITVAL(sess->entry_state, options, BIT_3); fw_ddb_entry->options = cpu_to_le16(options); options = le16_to_cpu(fw_ddb_entry->iscsi_options); SET_BITVAL(conn->hdrdgst_en, options, BIT_13); SET_BITVAL(conn->datadgst_en, options, BIT_12); SET_BITVAL(sess->imm_data_en, options, BIT_11); SET_BITVAL(sess->initial_r2t_en, options, BIT_10); SET_BITVAL(sess->dataseq_inorder_en, options, BIT_9); SET_BITVAL(sess->pdu_inorder_en, options, BIT_8); SET_BITVAL(sess->chap_auth_en, options, BIT_7); SET_BITVAL(conn->snack_req_en, options, BIT_6); SET_BITVAL(sess->discovery_logout_en, options, BIT_5); SET_BITVAL(sess->bidi_chap_en, options, BIT_4); SET_BITVAL(sess->discovery_auth_optional, options, BIT_3); SET_BITVAL(sess->erl & BIT_1, options, BIT_1); SET_BITVAL(sess->erl & BIT_0, options, BIT_0); fw_ddb_entry->iscsi_options = cpu_to_le16(options); options = le16_to_cpu(fw_ddb_entry->tcp_options); SET_BITVAL(conn->tcp_timestamp_stat, options, BIT_6); SET_BITVAL(conn->tcp_nagle_disable, options, BIT_5); SET_BITVAL(conn->tcp_wsf_disable, options, BIT_4); SET_BITVAL(conn->tcp_timer_scale & BIT_2, options, BIT_3); SET_BITVAL(conn->tcp_timer_scale & BIT_1, options, BIT_2); SET_BITVAL(conn->tcp_timer_scale & BIT_0, options, BIT_1); SET_BITVAL(conn->tcp_timestamp_en, options, BIT_0); fw_ddb_entry->tcp_options = cpu_to_le16(options); options = le16_to_cpu(fw_ddb_entry->ip_options); SET_BITVAL(conn->fragment_disable, options, BIT_4); fw_ddb_entry->ip_options = cpu_to_le16(options); fw_ddb_entry->iscsi_max_outsnd_r2t = cpu_to_le16(sess->max_r2t); fw_ddb_entry->iscsi_max_rcv_data_seg_len = cpu_to_le16(conn->max_recv_dlength / BYTE_UNITS); fw_ddb_entry->iscsi_max_snd_data_seg_len = cpu_to_le16(conn->max_xmit_dlength / BYTE_UNITS); fw_ddb_entry->iscsi_first_burst_len = cpu_to_le16(sess->first_burst / BYTE_UNITS); fw_ddb_entry->iscsi_max_burst_len = cpu_to_le16(sess->max_burst / BYTE_UNITS); fw_ddb_entry->iscsi_def_time2wait = cpu_to_le16(sess->time2wait); fw_ddb_entry->iscsi_def_time2retain = cpu_to_le16(sess->time2retain); fw_ddb_entry->tgt_portal_grp = cpu_to_le16(sess->tpgt); fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size); fw_ddb_entry->tcp_xmt_wsf = (uint8_t) cpu_to_le32(conn->tcp_xmit_wsf); fw_ddb_entry->tcp_rcv_wsf = (uint8_t) cpu_to_le32(conn->tcp_recv_wsf); fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label); fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout); fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port); fw_ddb_entry->stat_sn = cpu_to_le32(conn->statsn); fw_ddb_entry->exp_stat_sn = cpu_to_le32(conn->exp_statsn); fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_idx); fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx); fw_ddb_entry->tsid = cpu_to_le16(sess->tsid); fw_ddb_entry->port = cpu_to_le16(conn->port); fw_ddb_entry->def_timeout = cpu_to_le16(sess->default_taskmgmt_timeout); if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4)) fw_ddb_entry->ipv4_tos = conn->ipv6_traffic_class; else fw_ddb_entry->ipv4_tos = conn->ipv4_tos; if (conn->ipaddress) memcpy(fw_ddb_entry->ip_addr, conn->ipaddress, sizeof(fw_ddb_entry->ip_addr)); if (conn->redirect_ipaddr) memcpy(fw_ddb_entry->tgt_addr, conn->redirect_ipaddr, sizeof(fw_ddb_entry->tgt_addr)); if (conn->link_local_ipv6_addr) memcpy(fw_ddb_entry->link_local_ipv6_addr, conn->link_local_ipv6_addr, sizeof(fw_ddb_entry->link_local_ipv6_addr)); if (sess->targetname) memcpy(fw_ddb_entry->iscsi_name, sess->targetname, sizeof(fw_ddb_entry->iscsi_name)); if (sess->targetalias) memcpy(fw_ddb_entry->iscsi_alias, sess->targetalias, sizeof(fw_ddb_entry->iscsi_alias)); COPY_ISID(fw_ddb_entry->isid, sess->isid); return 0; } static void qla4xxx_copy_to_sess_conn_params(struct iscsi_conn *conn, struct iscsi_session *sess, struct dev_db_entry *fw_ddb_entry) { unsigned long options = 0; uint16_t ddb_link; uint16_t disc_parent; char ip_addr[DDB_IPADDR_LEN]; options = le16_to_cpu(fw_ddb_entry->options); conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options); sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE, &options); sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options); options = le16_to_cpu(fw_ddb_entry->iscsi_options); conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options); conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options); sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options); sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options); sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER, &options); sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options); sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options); sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN, &options); sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options); sess->discovery_auth_optional = test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options); if (test_bit(ISCSIOPT_ERL1, &options)) sess->erl |= BIT_1; if (test_bit(ISCSIOPT_ERL0, &options)) sess->erl |= BIT_0; options = le16_to_cpu(fw_ddb_entry->tcp_options); conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options); conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options); conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options); if (test_bit(TCPOPT_TIMER_SCALE3, &options)) conn->tcp_timer_scale |= BIT_3; if (test_bit(TCPOPT_TIMER_SCALE2, &options)) conn->tcp_timer_scale |= BIT_2; if (test_bit(TCPOPT_TIMER_SCALE1, &options)) conn->tcp_timer_scale |= BIT_1; conn->tcp_timer_scale >>= 1; conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options); options = le16_to_cpu(fw_ddb_entry->ip_options); conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options); conn->max_recv_dlength = BYTE_UNITS * le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len); conn->max_xmit_dlength = BYTE_UNITS * le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len); sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t); sess->first_burst = BYTE_UNITS * le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len); sess->max_burst = BYTE_UNITS * le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len); sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain); sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss); conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf; conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf; conn->ipv4_tos = fw_ddb_entry->ipv4_tos; conn->keepalive_tmo = le16_to_cpu(fw_ddb_entry->ka_timeout); conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port); conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn); conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn); sess->tsid = le16_to_cpu(fw_ddb_entry->tsid); COPY_ISID(sess->isid, fw_ddb_entry->isid); ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); if (ddb_link == DDB_ISNS) disc_parent = ISCSI_DISC_PARENT_ISNS; else if (ddb_link == DDB_NO_LINK) disc_parent = ISCSI_DISC_PARENT_UNKNOWN; else if (ddb_link < MAX_DDB_ENTRIES) disc_parent = ISCSI_DISC_PARENT_SENDTGT; else disc_parent = ISCSI_DISC_PARENT_UNKNOWN; iscsi_set_param(conn->cls_conn, ISCSI_PARAM_DISCOVERY_PARENT_TYPE, iscsi_get_discovery_parent_name(disc_parent), 0); iscsi_set_param(conn->cls_conn, ISCSI_PARAM_TARGET_ALIAS, (char *)fw_ddb_entry->iscsi_alias, 0); options = le16_to_cpu(fw_ddb_entry->options); if (options & DDB_OPT_IPV6_DEVICE) { memset(ip_addr, 0, sizeof(ip_addr)); sprintf(ip_addr, "%pI6", fw_ddb_entry->link_local_ipv6_addr); iscsi_set_param(conn->cls_conn, ISCSI_PARAM_LOCAL_IPADDR, (char *)ip_addr, 0); } } static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha, struct dev_db_entry *fw_ddb_entry, struct iscsi_cls_session *cls_sess, struct iscsi_cls_conn *cls_conn) { int buflen = 0; struct iscsi_session *sess; struct ddb_entry *ddb_entry; struct ql4_chap_table chap_tbl; struct iscsi_conn *conn; char ip_addr[DDB_IPADDR_LEN]; uint16_t options = 0; sess = cls_sess->dd_data; ddb_entry = sess->dd_data; conn = cls_conn->dd_data; memset(&chap_tbl, 0, sizeof(chap_tbl)); ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx); qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry); sess->def_taskmgmt_tmo = le16_to_cpu(fw_ddb_entry->def_timeout); conn->persistent_port = le16_to_cpu(fw_ddb_entry->port); memset(ip_addr, 0, sizeof(ip_addr)); options = le16_to_cpu(fw_ddb_entry->options); if (options & DDB_OPT_IPV6_DEVICE) { iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv6", 4); memset(ip_addr, 0, sizeof(ip_addr)); sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr); } else { iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv4", 4); sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr); } iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS, (char *)ip_addr, buflen); iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME, (char *)fw_ddb_entry->iscsi_name, buflen); iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME, (char *)ha->name_string, buflen); if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) { if (!qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name, chap_tbl.secret, ddb_entry->chap_tbl_idx)) { iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME, (char *)chap_tbl.name, strlen((char *)chap_tbl.name)); iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD, (char *)chap_tbl.secret, chap_tbl.secret_len); } } } void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry) { struct iscsi_cls_session *cls_sess; struct iscsi_cls_conn *cls_conn; uint32_t ddb_state; dma_addr_t fw_ddb_entry_dma; struct dev_db_entry *fw_ddb_entry; fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), &fw_ddb_entry_dma, GFP_KERNEL); if (!fw_ddb_entry) { ql4_printk(KERN_ERR, ha, "%s: Unable to allocate dma buffer\n", __func__); goto exit_session_conn_fwddb_param; } if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry, fw_ddb_entry_dma, NULL, NULL, &ddb_state, NULL, NULL, NULL) == QLA_ERROR) { DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed " "get_ddb_entry for fw_ddb_index %d\n", ha->host_no, __func__, ddb_entry->fw_ddb_index)); goto exit_session_conn_fwddb_param; } cls_sess = ddb_entry->sess; cls_conn = ddb_entry->conn; /* Update params */ qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn); exit_session_conn_fwddb_param: if (fw_ddb_entry) dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry, fw_ddb_entry_dma); } void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry) { struct iscsi_cls_session *cls_sess; struct iscsi_cls_conn *cls_conn; struct iscsi_session *sess; struct iscsi_conn *conn; uint32_t ddb_state; dma_addr_t fw_ddb_entry_dma; struct dev_db_entry *fw_ddb_entry; fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), &fw_ddb_entry_dma, GFP_KERNEL); if (!fw_ddb_entry) { ql4_printk(KERN_ERR, ha, "%s: Unable to allocate dma buffer\n", __func__); goto exit_session_conn_param; } if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry, fw_ddb_entry_dma, NULL, NULL, &ddb_state, NULL, NULL, NULL) == QLA_ERROR) { DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed " "get_ddb_entry for fw_ddb_index %d\n", ha->host_no, __func__, ddb_entry->fw_ddb_index)); goto exit_session_conn_param; } cls_sess = ddb_entry->sess; sess = cls_sess->dd_data; cls_conn = ddb_entry->conn; conn = cls_conn->dd_data; /* Update timers after login */ ddb_entry->default_relogin_timeout = (le16_to_cpu(fw_ddb_entry->def_timeout) > LOGIN_TOV) && (le16_to_cpu(fw_ddb_entry->def_timeout) < LOGIN_TOV * 10) ? le16_to_cpu(fw_ddb_entry->def_timeout) : LOGIN_TOV; ddb_entry->default_time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); /* Update params */ ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx); qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry); memcpy(sess->initiatorname, ha->name_string, min(sizeof(ha->name_string), sizeof(sess->initiatorname))); exit_session_conn_param: if (fw_ddb_entry) dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry, fw_ddb_entry_dma); } /* * Timer routines */ static void qla4xxx_timer(struct timer_list *t); static void qla4xxx_start_timer(struct scsi_qla_host *ha, unsigned long interval) { DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n", __func__, ha->host->host_no)); timer_setup(&ha->timer, qla4xxx_timer, 0); ha->timer.expires = jiffies + interval * HZ; add_timer(&ha->timer); ha->timer_active = 1; } static void qla4xxx_stop_timer(struct scsi_qla_host *ha) { del_timer_sync(&ha->timer); ha->timer_active = 0; } /*** * qla4xxx_mark_device_missing - blocks the session * @cls_session: Pointer to the session to be blocked * @ddb_entry: Pointer to device database entry * * This routine marks a device missing and close connection. **/ void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session) { iscsi_block_session(cls_session); } /** * qla4xxx_mark_all_devices_missing - mark all devices as missing. * @ha: Pointer to host adapter structure. * * This routine marks a device missing and resets the relogin retry count. **/ void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha) { iscsi_host_for_each_session(ha->host, qla4xxx_mark_device_missing); } static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry, struct scsi_cmnd *cmd) { struct srb *srb; srb = mempool_alloc(ha->srb_mempool, GFP_ATOMIC); if (!srb) return srb; kref_init(&srb->srb_ref); srb->ha = ha; srb->ddb = ddb_entry; srb->cmd = cmd; srb->flags = 0; qla4xxx_cmd_priv(cmd)->srb = srb; return srb; } static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb) { struct scsi_cmnd *cmd = srb->cmd; if (srb->flags & SRB_DMA_VALID) { scsi_dma_unmap(cmd); srb->flags &= ~SRB_DMA_VALID; } qla4xxx_cmd_priv(cmd)->srb = NULL; } void qla4xxx_srb_compl(struct kref *ref) { struct srb *srb = container_of(ref, struct srb, srb_ref); struct scsi_cmnd *cmd = srb->cmd; struct scsi_qla_host *ha = srb->ha; qla4xxx_srb_free_dma(ha, srb); mempool_free(srb, ha->srb_mempool); scsi_done(cmd); } /** * qla4xxx_queuecommand - scsi layer issues scsi command to driver. * @host: scsi host * @cmd: Pointer to Linux's SCSI command structure * * Remarks: * This routine is invoked by Linux to send a SCSI command to the driver. * The mid-level driver tries to ensure that queuecommand never gets * invoked concurrently with itself or the interrupt handler (although * the interrupt handler may call this routine as part of request- * completion handling). Unfortunely, it sometimes calls the scheduler * in interrupt context which is a big NO! NO!. **/ static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) { struct scsi_qla_host *ha = to_qla_host(host); struct ddb_entry *ddb_entry = cmd->device->hostdata; struct iscsi_cls_session *sess = ddb_entry->sess; struct srb *srb; int rval; if (test_bit(AF_EEH_BUSY, &ha->flags)) { if (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags)) cmd->result = DID_NO_CONNECT << 16; else cmd->result = DID_REQUEUE << 16; goto qc_fail_command; } if (!sess) { cmd->result = DID_IMM_RETRY << 16; goto qc_fail_command; } rval = iscsi_session_chkready(sess); if (rval) { cmd->result = rval; goto qc_fail_command; } if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) || test_bit(DPC_RESET_HA, &ha->dpc_flags) || test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) || test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) || !test_bit(AF_ONLINE, &ha->flags) || !test_bit(AF_LINK_UP, &ha->flags) || test_bit(AF_LOOPBACK, &ha->flags) || test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags) || test_bit(DPC_RESTORE_ACB, &ha->dpc_flags) || test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) goto qc_host_busy; srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd); if (!srb) goto qc_host_busy; rval = qla4xxx_send_command_to_isp(ha, srb); if (rval != QLA_SUCCESS) goto qc_host_busy_free_sp; return 0; qc_host_busy_free_sp: qla4xxx_srb_free_dma(ha, srb); mempool_free(srb, ha->srb_mempool); qc_host_busy: return SCSI_MLQUEUE_HOST_BUSY; qc_fail_command: scsi_done(cmd); return 0; } /** * qla4xxx_mem_free - frees memory allocated to adapter * @ha: Pointer to host adapter structure. * * Frees memory previously allocated by qla4xxx_mem_alloc **/ static void qla4xxx_mem_free(struct scsi_qla_host *ha) { if (ha->queues) dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues, ha->queues_dma); vfree(ha->fw_dump); ha->queues_len = 0; ha->queues = NULL; ha->queues_dma = 0; ha->request_ring = NULL; ha->request_dma = 0; ha->response_ring = NULL; ha->response_dma = 0; ha->shadow_regs = NULL; ha->shadow_regs_dma = 0; ha->fw_dump = NULL; ha->fw_dump_size = 0; /* Free srb pool. */ mempool_destroy(ha->srb_mempool); ha->srb_mempool = NULL; dma_pool_destroy(ha->chap_dma_pool); vfree(ha->chap_list); ha->chap_list = NULL; dma_pool_destroy(ha->fw_ddb_dma_pool); /* release io space registers */ if (is_qla8022(ha)) { if (ha->nx_pcibase) iounmap( (struct device_reg_82xx __iomem *)ha->nx_pcibase); } else if (is_qla8032(ha) || is_qla8042(ha)) { if (ha->nx_pcibase) iounmap( (struct device_reg_83xx __iomem *)ha->nx_pcibase); } else if (ha->reg) { iounmap(ha->reg); } vfree(ha->reset_tmplt.buff); pci_release_regions(ha->pdev); } /** * qla4xxx_mem_alloc - allocates memory for use by adapter. * @ha: Pointer to host adapter structure * * Allocates DMA memory for request and response queues. Also allocates memory * for srbs. **/ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha) { unsigned long align; /* Allocate contiguous block of DMA memory for queues. */ ha->queues_len = ((REQUEST_QUEUE_DEPTH * QUEUE_SIZE) + (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE) + sizeof(struct shadow_regs) + MEM_ALIGN_VALUE + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len, &ha->queues_dma, GFP_KERNEL); if (ha->queues == NULL) { ql4_printk(KERN_WARNING, ha, "Memory Allocation failed - queues.\n"); goto mem_alloc_error_exit; } /* * As per RISC alignment requirements -- the bus-address must be a * multiple of the request-ring size (in bytes). */ align = 0; if ((unsigned long)ha->queues_dma & (MEM_ALIGN_VALUE - 1)) align = MEM_ALIGN_VALUE - ((unsigned long)ha->queues_dma & (MEM_ALIGN_VALUE - 1)); /* Update request and response queue pointers. */ ha->request_dma = ha->queues_dma + align; ha->request_ring = (struct queue_entry *) (ha->queues + align); ha->response_dma = ha->queues_dma + align + (REQUEST_QUEUE_DEPTH * QUEUE_SIZE); ha->response_ring = (struct queue_entry *) (ha->queues + align + (REQUEST_QUEUE_DEPTH * QUEUE_SIZE)); ha->shadow_regs_dma = ha->queues_dma + align + (REQUEST_QUEUE_DEPTH * QUEUE_SIZE) + (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE); ha->shadow_regs = (struct shadow_regs *) (ha->queues + align + (REQUEST_QUEUE_DEPTH * QUEUE_SIZE) + (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE)); /* Allocate memory for srb pool. */ ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab, mempool_free_slab, srb_cachep); if (ha->srb_mempool == NULL) { ql4_printk(KERN_WARNING, ha, "Memory Allocation failed - SRB Pool.\n"); goto mem_alloc_error_exit; } ha->chap_dma_pool = dma_pool_create("ql4_chap", &ha->pdev->dev, CHAP_DMA_BLOCK_SIZE, 8, 0); if (ha->chap_dma_pool == NULL) { ql4_printk(KERN_WARNING, ha, "%s: chap_dma_pool allocation failed..\n", __func__); goto mem_alloc_error_exit; } ha->fw_ddb_dma_pool = dma_pool_create("ql4_fw_ddb", &ha->pdev->dev, DDB_DMA_BLOCK_SIZE, 8, 0); if (ha->fw_ddb_dma_pool == NULL) { ql4_printk(KERN_WARNING, ha, "%s: fw_ddb_dma_pool allocation failed..\n", __func__); goto mem_alloc_error_exit; } return QLA_SUCCESS; mem_alloc_error_exit: return QLA_ERROR; } /** * qla4_8xxx_check_temp - Check the ISP82XX temperature. * @ha: adapter block pointer. * * Note: The caller should not hold the idc lock. **/ static int qla4_8xxx_check_temp(struct scsi_qla_host *ha) { uint32_t temp, temp_state, temp_val; int status = QLA_SUCCESS; temp = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_TEMP_STATE); temp_state = qla82xx_get_temp_state(temp); temp_val = qla82xx_get_temp_val(temp); if (temp_state == QLA82XX_TEMP_PANIC) { ql4_printk(KERN_WARNING, ha, "Device temperature %d degrees C" " exceeds maximum allowed. Hardware has been shut" " down.\n", temp_val); status = QLA_ERROR; } else if (temp_state == QLA82XX_TEMP_WARN) { if (ha->temperature == QLA82XX_TEMP_NORMAL) ql4_printk(KERN_WARNING, ha, "Device temperature %d" " degrees C exceeds operating range." " Immediate action needed.\n", temp_val); } else { if (ha->temperature == QLA82XX_TEMP_WARN) ql4_printk(KERN_INFO, ha, "Device temperature is" " now %d degrees C in normal range.\n", temp_val); } ha->temperature = temp_state; return status; } /** * qla4_8xxx_check_fw_alive - Check firmware health * @ha: Pointer to host adapter structure. * * Context: Interrupt **/ static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha) { uint32_t fw_heartbeat_counter; int status = QLA_SUCCESS; fw_heartbeat_counter = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER); /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */ if (fw_heartbeat_counter == 0xffffffff) { DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen " "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n", ha->host_no, __func__)); return status; } if (ha->fw_heartbeat_counter == fw_heartbeat_counter) { ha->seconds_since_last_heartbeat++; /* FW not alive after 2 seconds */ if (ha->seconds_since_last_heartbeat == 2) { ha->seconds_since_last_heartbeat = 0; qla4_8xxx_dump_peg_reg(ha); status = QLA_ERROR; } } else ha->seconds_since_last_heartbeat = 0; ha->fw_heartbeat_counter = fw_heartbeat_counter; return status; } static void qla4_8xxx_process_fw_error(struct scsi_qla_host *ha) { uint32_t halt_status; int halt_status_unrecoverable = 0; halt_status = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_HALT_STATUS1); if (is_qla8022(ha)) { ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n", __func__); qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, CRB_NIU_XG_PAUSE_CTL_P0 | CRB_NIU_XG_PAUSE_CTL_P1); if (QLA82XX_FWERROR_CODE(halt_status) == 0x67) ql4_printk(KERN_ERR, ha, "%s: Firmware aborted with error code 0x00006700. Device is being reset\n", __func__); if (halt_status & HALT_STATUS_UNRECOVERABLE) halt_status_unrecoverable = 1; } else if (is_qla8032(ha) || is_qla8042(ha)) { if (halt_status & QLA83XX_HALT_STATUS_FW_RESET) ql4_printk(KERN_ERR, ha, "%s: Firmware error detected device is being reset\n", __func__); else if (halt_status & QLA83XX_HALT_STATUS_UNRECOVERABLE) halt_status_unrecoverable = 1; } /* * Since we cannot change dev_state in interrupt context, * set appropriate DPC flag then wakeup DPC */ if (halt_status_unrecoverable) { set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags); } else { ql4_printk(KERN_INFO, ha, "%s: detect abort needed!\n", __func__); set_bit(DPC_RESET_HA, &ha->dpc_flags); } qla4xxx_mailbox_premature_completion(ha); qla4xxx_wake_dpc(ha); } /** * qla4_8xxx_watchdog - Poll dev state * @ha: Pointer to host adapter structure. * * Context: Interrupt **/ void qla4_8xxx_watchdog(struct scsi_qla_host *ha) { uint32_t dev_state; uint32_t idc_ctrl; if (is_qla8032(ha) && (qla4_83xx_is_detached(ha) == QLA_SUCCESS)) WARN_ONCE(1, "%s: iSCSI function %d marked invisible\n", __func__, ha->func_num); /* don't poll if reset is going on */ if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) || test_bit(DPC_RESET_HA, &ha->dpc_flags) || test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) { dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE); if (qla4_8xxx_check_temp(ha)) { if (is_qla8022(ha)) { ql4_printk(KERN_INFO, ha, "disabling pause transmit on port 0 & 1.\n"); qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, CRB_NIU_XG_PAUSE_CTL_P0 | CRB_NIU_XG_PAUSE_CTL_P1); } set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags); qla4xxx_wake_dpc(ha); } else if (dev_state == QLA8XXX_DEV_NEED_RESET && !test_bit(DPC_RESET_HA, &ha->dpc_flags)) { ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET!\n", __func__); if (is_qla8032(ha) || is_qla8042(ha)) { idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL); if (!(idc_ctrl & GRACEFUL_RESET_BIT1)) { ql4_printk(KERN_INFO, ha, "%s: Graceful reset bit is not set\n", __func__); qla4xxx_mailbox_premature_completion( ha); } } if ((is_qla8032(ha) || is_qla8042(ha)) || (is_qla8022(ha) && !ql4xdontresethba)) { set_bit(DPC_RESET_HA, &ha->dpc_flags); qla4xxx_wake_dpc(ha); } } else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT && !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) { ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n", __func__); set_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags); qla4xxx_wake_dpc(ha); } else { /* Check firmware health */ if (qla4_8xxx_check_fw_alive(ha)) qla4_8xxx_process_fw_error(ha); } } } static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess) { struct iscsi_session *sess; struct ddb_entry *ddb_entry; struct scsi_qla_host *ha; sess = cls_sess->dd_data; ddb_entry = sess->dd_data; ha = ddb_entry->ha; if (!(ddb_entry->ddb_type == FLASH_DDB)) return; if (adapter_up(ha) && !test_bit(DF_RELOGIN, &ddb_entry->flags) && !iscsi_is_session_online(cls_sess)) { if (atomic_read(&ddb_entry->retry_relogin_timer) != INVALID_ENTRY) { if (atomic_read(&ddb_entry->retry_relogin_timer) == 0) { atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY); set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags); set_bit(DF_RELOGIN, &ddb_entry->flags); DEBUG2(ql4_printk(KERN_INFO, ha, "%s: index [%d] login device\n", __func__, ddb_entry->fw_ddb_index)); } else atomic_dec(&ddb_entry->retry_relogin_timer); } } /* Wait for relogin to timeout */ if (atomic_read(&ddb_entry->relogin_timer) && (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) { /* * If the relogin times out and the device is * still NOT ONLINE then try and relogin again. */ if (!iscsi_is_session_online(cls_sess)) { /* Reset retry relogin timer */ atomic_inc(&ddb_entry->relogin_retry_count); DEBUG2(ql4_printk(KERN_INFO, ha, "%s: index[%d] relogin timed out-retrying" " relogin (%d), retry (%d)\n", __func__, ddb_entry->fw_ddb_index, atomic_read(&ddb_entry->relogin_retry_count), ddb_entry->default_time2wait + 4)); set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags); atomic_set(&ddb_entry->retry_relogin_timer, ddb_entry->default_time2wait + 4); } } } /** * qla4xxx_timer - checks every second for work to do. * @t: Context to obtain pointer to host adapter structure. **/ static void qla4xxx_timer(struct timer_list *t) { struct scsi_qla_host *ha = from_timer(ha, t, timer); int start_dpc = 0; uint16_t w; iscsi_host_for_each_session(ha->host, qla4xxx_check_relogin_flash_ddb); /* If we are in the middle of AER/EEH processing * skip any processing and reschedule the timer */ if (test_bit(AF_EEH_BUSY, &ha->flags)) { mod_timer(&ha->timer, jiffies + HZ); return; } /* Hardware read to trigger an EEH error during mailbox waits. */ if (!pci_channel_offline(ha->pdev)) pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); if (is_qla80XX(ha)) qla4_8xxx_watchdog(ha); if (is_qla40XX(ha)) { /* Check for heartbeat interval. */ if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE && ha->heartbeat_interval != 0) { ha->seconds_since_last_heartbeat++; if (ha->seconds_since_last_heartbeat > ha->heartbeat_interval + 2) set_bit(DPC_RESET_HA, &ha->dpc_flags); } } /* Process any deferred work. */ if (!list_empty(&ha->work_list)) start_dpc++; /* Wakeup the dpc routine for this adapter, if needed. */ if (start_dpc || test_bit(DPC_RESET_HA, &ha->dpc_flags) || test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) || test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) || test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) || test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) || test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) || test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) || test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) || test_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags) || test_bit(DPC_AEN, &ha->dpc_flags)) { DEBUG2(printk("scsi%ld: %s: scheduling dpc routine" " - dpc flags = 0x%lx\n", ha->host_no, __func__, ha->dpc_flags)); qla4xxx_wake_dpc(ha); } /* Reschedule timer thread to call us back in one second */ mod_timer(&ha->timer, jiffies + HZ); DEBUG2(ha->seconds_since_last_intr++); } /** * qla4xxx_cmd_wait - waits for all outstanding commands to complete * @ha: Pointer to host adapter structure. * * This routine stalls the driver until all outstanding commands are returned. * Caller must release the Hardware Lock prior to calling this routine. **/ static int qla4xxx_cmd_wait(struct scsi_qla_host *ha) { uint32_t index = 0; unsigned long flags; struct scsi_cmnd *cmd; unsigned long wtime; uint32_t wtmo; if (is_qla40XX(ha)) wtmo = WAIT_CMD_TOV; else wtmo = ha->nx_reset_timeout / 2; wtime = jiffies + (wtmo * HZ); DEBUG2(ql4_printk(KERN_INFO, ha, "Wait up to %u seconds for cmds to complete\n", wtmo)); while (!time_after_eq(jiffies, wtime)) { spin_lock_irqsave(&ha->hardware_lock, flags); /* Find a command that hasn't completed. */ for (index = 0; index < ha->host->can_queue; index++) { cmd = scsi_host_find_tag(ha->host, index); /* * We cannot just check if the index is valid, * becase if we are run from the scsi eh, then * the scsi/block layer is going to prevent * the tag from being released. */ if (cmd != NULL && qla4xxx_cmd_priv(cmd)->srb) break; } spin_unlock_irqrestore(&ha->hardware_lock, flags); /* If No Commands are pending, wait is complete */ if (index == ha->host->can_queue) return QLA_SUCCESS; msleep(1000); } /* If we timed out on waiting for commands to come back * return ERROR. */ return QLA_ERROR; } int qla4xxx_hw_reset(struct scsi_qla_host *ha) { uint32_t ctrl_status; unsigned long flags = 0; DEBUG2(printk(KERN_ERR "scsi%ld: %s\n", ha->host_no, __func__)); if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS) return QLA_ERROR; spin_lock_irqsave(&ha->hardware_lock, flags); /* * If the SCSI Reset Interrupt bit is set, clear it. * Otherwise, the Soft Reset won't work. */ ctrl_status = readw(&ha->reg->ctrl_status); if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status); /* Issue Soft Reset */ writel(set_rmask(CSR_SOFT_RESET), &ha->reg->ctrl_status); readl(&ha->reg->ctrl_status); spin_unlock_irqrestore(&ha->hardware_lock, flags); return QLA_SUCCESS; } /** * qla4xxx_soft_reset - performs soft reset. * @ha: Pointer to host adapter structure. **/ int qla4xxx_soft_reset(struct scsi_qla_host *ha) { uint32_t max_wait_time; unsigned long flags = 0; int status; uint32_t ctrl_status; status = qla4xxx_hw_reset(ha); if (status != QLA_SUCCESS) return status; status = QLA_ERROR; /* Wait until the Network Reset Intr bit is cleared */ max_wait_time = RESET_INTR_TOV; do { spin_lock_irqsave(&ha->hardware_lock, flags); ctrl_status = readw(&ha->reg->ctrl_status); spin_unlock_irqrestore(&ha->hardware_lock, flags); if ((ctrl_status & CSR_NET_RESET_INTR) == 0) break; msleep(1000); } while ((--max_wait_time)); if ((ctrl_status & CSR_NET_RESET_INTR) != 0) { DEBUG2(printk(KERN_WARNING "scsi%ld: Network Reset Intr not cleared by " "Network function, clearing it now!\n", ha->host_no)); spin_lock_irqsave(&ha->hardware_lock, flags); writel(set_rmask(CSR_NET_RESET_INTR), &ha->reg->ctrl_status); readl(&ha->reg->ctrl_status); spin_unlock_irqrestore(&ha->hardware_lock, flags); } /* Wait until the firmware tells us the Soft Reset is done */ max_wait_time = SOFT_RESET_TOV; do { spin_lock_irqsave(&ha->hardware_lock, flags); ctrl_status = readw(&ha->reg->ctrl_status); spin_unlock_irqrestore(&ha->hardware_lock, flags); if ((ctrl_status & CSR_SOFT_RESET) == 0) { status = QLA_SUCCESS; break; } msleep(1000); } while ((--max_wait_time)); /* * Also, make sure that the SCSI Reset Interrupt bit has been cleared * after the soft reset has taken place. */ spin_lock_irqsave(&ha->hardware_lock, flags); ctrl_status = readw(&ha->reg->ctrl_status); if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) { writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status); readl(&ha->reg->ctrl_status); } spin_unlock_irqrestore(&ha->hardware_lock, flags); /* If soft reset fails then most probably the bios on other * function is also enabled. * Since the initialization is sequential the other fn * wont be able to acknowledge the soft reset. * Issue a force soft reset to workaround this scenario. */ if (max_wait_time == 0) { /* Issue Force Soft Reset */ spin_lock_irqsave(&ha->hardware_lock, flags); writel(set_rmask(CSR_FORCE_SOFT_RESET), &ha->reg->ctrl_status); readl(&ha->reg->ctrl_status); spin_unlock_irqrestore(&ha->hardware_lock, flags); /* Wait until the firmware tells us the Soft Reset is done */ max_wait_time = SOFT_RESET_TOV; do { spin_lock_irqsave(&ha->hardware_lock, flags); ctrl_status = readw(&ha->reg->ctrl_status); spin_unlock_irqrestore(&ha->hardware_lock, flags); if ((ctrl_status & CSR_FORCE_SOFT_RESET) == 0) { status = QLA_SUCCESS; break; } msleep(1000); } while ((--max_wait_time)); } return status; } /** * qla4xxx_abort_active_cmds - returns all outstanding i/o requests to O.S. * @ha: Pointer to host adapter structure. * @res: returned scsi status * * This routine is called just prior to a HARD RESET to return all * outstanding commands back to the Operating System. * Caller should make sure that the following locks are released * before this calling routine: Hardware lock, and io_request_lock. **/ static void qla4xxx_abort_active_cmds(struct scsi_qla_host *ha, int res) { struct srb *srb; int i; unsigned long flags; spin_lock_irqsave(&ha->hardware_lock, flags); for (i = 0; i < ha->host->can_queue; i++) { srb = qla4xxx_del_from_active_array(ha, i); if (srb != NULL) { srb->cmd->result = res; kref_put(&srb->srb_ref, qla4xxx_srb_compl); } } spin_unlock_irqrestore(&ha->hardware_lock, flags); } void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha) { clear_bit(AF_ONLINE, &ha->flags); /* Disable the board */ ql4_printk(KERN_INFO, ha, "Disabling the board\n"); qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16); qla4xxx_mark_all_devices_missing(ha); clear_bit(AF_INIT_DONE, &ha->flags); } static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session) { struct iscsi_session *sess; struct ddb_entry *ddb_entry; sess = cls_session->dd_data; ddb_entry = sess->dd_data; ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED; if (ddb_entry->ddb_type == FLASH_DDB) iscsi_block_session(ddb_entry->sess); else iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED); } /** * qla4xxx_recover_adapter - recovers adapter after a fatal error * @ha: Pointer to host adapter structure. **/ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha) { int status = QLA_ERROR; uint8_t reset_chip = 0; uint32_t dev_state; unsigned long wait; /* Stall incoming I/O until we are done */ scsi_block_requests(ha->host); clear_bit(AF_ONLINE, &ha->flags); clear_bit(AF_LINK_UP, &ha->flags); DEBUG2(ql4_printk(KERN_INFO, ha, "%s: adapter OFFLINE\n", __func__)); set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); if ((is_qla8032(ha) || is_qla8042(ha)) && !test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) { ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n", __func__); /* disable pause frame for ISP83xx */ qla4_83xx_disable_pause(ha); } iscsi_host_for_each_session(ha->host, qla4xxx_fail_session); if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) reset_chip = 1; /* For the DPC_RESET_HA_INTR case (ISP-4xxx specific) * do not reset adapter, jump to initialize_adapter */ if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) { status = QLA_SUCCESS; goto recover_ha_init_adapter; } /* For the ISP-8xxx adapter, issue a stop_firmware if invoked * from eh_host_reset or ioctl module */ if (is_qla80XX(ha) && !reset_chip && test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) { DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: %s - Performing stop_firmware...\n", ha->host_no, __func__)); status = ha->isp_ops->reset_firmware(ha); if (status == QLA_SUCCESS) { ha->isp_ops->disable_intrs(ha); qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); qla4xxx_abort_active_cmds(ha, DID_RESET << 16); } else { /* If the stop_firmware fails then * reset the entire chip */ reset_chip = 1; clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); set_bit(DPC_RESET_HA, &ha->dpc_flags); } } /* Issue full chip reset if recovering from a catastrophic error, * or if stop_firmware fails for ISP-8xxx. * This is the default case for ISP-4xxx */ if (is_qla40XX(ha) || reset_chip) { if (is_qla40XX(ha)) goto chip_reset; /* Check if 8XXX firmware is alive or not * We may have arrived here from NEED_RESET * detection only */ if (test_bit(AF_FW_RECOVERY, &ha->flags)) goto chip_reset; wait = jiffies + (FW_ALIVE_WAIT_TOV * HZ); while (time_before(jiffies, wait)) { if (qla4_8xxx_check_fw_alive(ha)) { qla4xxx_mailbox_premature_completion(ha); break; } set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(HZ); } chip_reset: if (!test_bit(AF_FW_RECOVERY, &ha->flags)) qla4xxx_cmd_wait(ha); qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: %s - Performing chip reset..\n", ha->host_no, __func__)); status = ha->isp_ops->reset_chip(ha); qla4xxx_abort_active_cmds(ha, DID_RESET << 16); } /* Flush any pending ddb changed AENs */ qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); recover_ha_init_adapter: /* Upon successful firmware/chip reset, re-initialize the adapter */ if (status == QLA_SUCCESS) { /* For ISP-4xxx, force function 1 to always initialize * before function 3 to prevent both funcions from * stepping on top of the other */ if (is_qla40XX(ha) && (ha->mac_index == 3)) ssleep(6); /* NOTE: AF_ONLINE flag set upon successful completion of * qla4xxx_initialize_adapter */ status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER); if (is_qla80XX(ha) && (status == QLA_ERROR)) { status = qla4_8xxx_check_init_adapter_retry(ha); if (status == QLA_ERROR) { ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Don't retry recover adapter\n", ha->host_no, __func__); qla4xxx_dead_adapter_cleanup(ha); clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); clear_bit(DPC_RESET_HA, &ha->dpc_flags); clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); goto exit_recover; } } } /* Retry failed adapter initialization, if necessary * Do not retry initialize_adapter for RESET_HA_INTR (ISP-4xxx specific) * case to prevent ping-pong resets between functions */ if (!test_bit(AF_ONLINE, &ha->flags) && !test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) { /* Adapter initialization failed, see if we can retry * resetting the ha. * Since we don't want to block the DPC for too long * with multiple resets in the same thread, * utilize DPC to retry */ if (is_qla80XX(ha)) { ha->isp_ops->idc_lock(ha); dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE); ha->isp_ops->idc_unlock(ha); if (dev_state == QLA8XXX_DEV_FAILED) { ql4_printk(KERN_INFO, ha, "%s: don't retry " "recover adapter. H/W is in Failed " "state\n", __func__); qla4xxx_dead_adapter_cleanup(ha); clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); clear_bit(DPC_RESET_HA, &ha->dpc_flags); clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); status = QLA_ERROR; goto exit_recover; } } if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) { ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES; DEBUG2(printk("scsi%ld: recover adapter - retrying " "(%d) more times\n", ha->host_no, ha->retry_reset_ha_cnt)); set_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); status = QLA_ERROR; } else { if (ha->retry_reset_ha_cnt > 0) { /* Schedule another Reset HA--DPC will retry */ ha->retry_reset_ha_cnt--; DEBUG2(printk("scsi%ld: recover adapter - " "retry remaining %d\n", ha->host_no, ha->retry_reset_ha_cnt)); status = QLA_ERROR; } if (ha->retry_reset_ha_cnt == 0) { /* Recover adapter retries have been exhausted. * Adapter DEAD */ DEBUG2(printk("scsi%ld: recover adapter " "failed - board disabled\n", ha->host_no)); qla4xxx_dead_adapter_cleanup(ha); clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); clear_bit(DPC_RESET_HA, &ha->dpc_flags); clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); status = QLA_ERROR; } } } else { clear_bit(DPC_RESET_HA, &ha->dpc_flags); clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); } exit_recover: ha->adapter_error_count++; if (test_bit(AF_ONLINE, &ha->flags)) ha->isp_ops->enable_intrs(ha); scsi_unblock_requests(ha->host); clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); DEBUG2(printk("scsi%ld: recover adapter: %s\n", ha->host_no, status == QLA_ERROR ? "FAILED" : "SUCCEEDED")); return status; } static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session) { struct iscsi_session *sess; struct ddb_entry *ddb_entry; struct scsi_qla_host *ha; sess = cls_session->dd_data; ddb_entry = sess->dd_data; ha = ddb_entry->ha; if (!iscsi_is_session_online(cls_session)) { if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) { ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" " unblock session\n", ha->host_no, __func__, ddb_entry->fw_ddb_index); iscsi_unblock_session(ddb_entry->sess); } else { /* Trigger relogin */ if (ddb_entry->ddb_type == FLASH_DDB) { if (!(test_bit(DF_RELOGIN, &ddb_entry->flags) || test_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags))) qla4xxx_arm_relogin_timer(ddb_entry); } else iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED); } } } int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session) { struct iscsi_session *sess; struct ddb_entry *ddb_entry; struct scsi_qla_host *ha; sess = cls_session->dd_data; ddb_entry = sess->dd_data; ha = ddb_entry->ha; ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" " unblock session\n", ha->host_no, __func__, ddb_entry->fw_ddb_index); iscsi_unblock_session(ddb_entry->sess); /* Start scan target */ if (test_bit(AF_ONLINE, &ha->flags)) { ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" " start scan\n", ha->host_no, __func__, ddb_entry->fw_ddb_index); queue_work(ddb_entry->sess->workq, &ddb_entry->sess->scan_work); } return QLA_SUCCESS; } int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session) { struct iscsi_session *sess; struct ddb_entry *ddb_entry; struct scsi_qla_host *ha; int status = QLA_SUCCESS; sess = cls_session->dd_data; ddb_entry = sess->dd_data; ha = ddb_entry->ha; ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" " unblock user space session\n", ha->host_no, __func__, ddb_entry->fw_ddb_index); if (!iscsi_is_session_online(cls_session)) { iscsi_conn_start(ddb_entry->conn); iscsi_conn_login_event(ddb_entry->conn, ISCSI_CONN_STATE_LOGGED_IN); } else { ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d] session [%d] already logged in\n", ha->host_no, __func__, ddb_entry->fw_ddb_index, cls_session->sid); status = QLA_ERROR; } return status; } static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha) { iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices); } static void qla4xxx_relogin_flash_ddb(struct iscsi_cls_session *cls_sess) { uint16_t relogin_timer; struct iscsi_session *sess; struct ddb_entry *ddb_entry; struct scsi_qla_host *ha; sess = cls_sess->dd_data; ddb_entry = sess->dd_data; ha = ddb_entry->ha; relogin_timer = max(ddb_entry->default_relogin_timeout, (uint16_t)RELOGIN_TOV); atomic_set(&ddb_entry->relogin_timer, relogin_timer); DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no, ddb_entry->fw_ddb_index, relogin_timer)); qla4xxx_login_flash_ddb(cls_sess); } static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess) { struct iscsi_session *sess; struct ddb_entry *ddb_entry; struct scsi_qla_host *ha; sess = cls_sess->dd_data; ddb_entry = sess->dd_data; ha = ddb_entry->ha; if (!(ddb_entry->ddb_type == FLASH_DDB)) return; if (test_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags)) return; if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) && !iscsi_is_session_online(cls_sess)) { DEBUG2(ql4_printk(KERN_INFO, ha, "relogin issued\n")); qla4xxx_relogin_flash_ddb(cls_sess); } } void qla4xxx_wake_dpc(struct scsi_qla_host *ha) { if (ha->dpc_thread) queue_work(ha->dpc_thread, &ha->dpc_work); } static struct qla4_work_evt * qla4xxx_alloc_work(struct scsi_qla_host *ha, uint32_t data_size, enum qla4_work_type type) { struct qla4_work_evt *e; uint32_t size = sizeof(struct qla4_work_evt) + data_size; e = kzalloc(size, GFP_ATOMIC); if (!e) return NULL; INIT_LIST_HEAD(&e->list); e->type = type; return e; } static void qla4xxx_post_work(struct scsi_qla_host *ha, struct qla4_work_evt *e) { unsigned long flags; spin_lock_irqsave(&ha->work_lock, flags); list_add_tail(&e->list, &ha->work_list); spin_unlock_irqrestore(&ha->work_lock, flags); qla4xxx_wake_dpc(ha); } int qla4xxx_post_aen_work(struct scsi_qla_host *ha, enum iscsi_host_event_code aen_code, uint32_t data_size, uint8_t *data) { struct qla4_work_evt *e; e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_AEN); if (!e) return QLA_ERROR; e->u.aen.code = aen_code; e->u.aen.data_size = data_size; memcpy(e->u.aen.data, data, data_size); qla4xxx_post_work(ha, e); return QLA_SUCCESS; } int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha, uint32_t status, uint32_t pid, uint32_t data_size, uint8_t *data) { struct qla4_work_evt *e; e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_PING_STATUS); if (!e) return QLA_ERROR; e->u.ping.status = status; e->u.ping.pid = pid; e->u.ping.data_size = data_size; memcpy(e->u.ping.data, data, data_size); qla4xxx_post_work(ha, e); return QLA_SUCCESS; } static void qla4xxx_do_work(struct scsi_qla_host *ha) { struct qla4_work_evt *e, *tmp; unsigned long flags; LIST_HEAD(work); spin_lock_irqsave(&ha->work_lock, flags); list_splice_init(&ha->work_list, &work); spin_unlock_irqrestore(&ha->work_lock, flags); list_for_each_entry_safe(e, tmp, &work, list) { list_del_init(&e->list); switch (e->type) { case QLA4_EVENT_AEN: iscsi_post_host_event(ha->host_no, &qla4xxx_iscsi_transport, e->u.aen.code, e->u.aen.data_size, e->u.aen.data); break; case QLA4_EVENT_PING_STATUS: iscsi_ping_comp_event(ha->host_no, &qla4xxx_iscsi_transport, e->u.ping.status, e->u.ping.pid, e->u.ping.data_size, e->u.ping.data); break; default: ql4_printk(KERN_WARNING, ha, "event type: 0x%x not " "supported", e->type); } kfree(e); } } /** * qla4xxx_do_dpc - dpc routine * @work: Context to obtain pointer to host adapter structure. * * This routine is a task that is schedule by the interrupt handler * to perform the background processing for interrupts. We put it * on a task queue that is consumed whenever the scheduler runs; that's * so you can do anything (i.e. put the process to sleep etc). In fact, * the mid-level tries to sleep when it reaches the driver threshold * "host->can_queue". This can cause a panic if we were in our interrupt code. **/ static void qla4xxx_do_dpc(struct work_struct *work) { struct scsi_qla_host *ha = container_of(work, struct scsi_qla_host, dpc_work); int status = QLA_ERROR; DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: %s: DPC handler waking up. flags = 0x%08lx, dpc_flags = 0x%08lx\n", ha->host_no, __func__, ha->flags, ha->dpc_flags)); /* Initialization not yet finished. Don't do anything yet. */ if (!test_bit(AF_INIT_DONE, &ha->flags)) return; if (test_bit(AF_EEH_BUSY, &ha->flags)) { DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n", ha->host_no, __func__, ha->flags)); return; } /* post events to application */ qla4xxx_do_work(ha); if (is_qla80XX(ha)) { if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) { if (is_qla8032(ha) || is_qla8042(ha)) { ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n", __func__); /* disable pause frame for ISP83xx */ qla4_83xx_disable_pause(ha); } ha->isp_ops->idc_lock(ha); qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, QLA8XXX_DEV_FAILED); ha->isp_ops->idc_unlock(ha); ql4_printk(KERN_INFO, ha, "HW State: FAILED\n"); qla4_8xxx_device_state_handler(ha); } if (test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags)) { if (is_qla8042(ha)) { if (ha->idc_info.info2 & ENABLE_INTERNAL_LOOPBACK) { ql4_printk(KERN_INFO, ha, "%s: Disabling ACB\n", __func__); status = qla4_84xx_config_acb(ha, ACB_CONFIG_DISABLE); if (status != QLA_SUCCESS) { ql4_printk(KERN_INFO, ha, "%s: ACB config failed\n", __func__); } } } qla4_83xx_post_idc_ack(ha); clear_bit(DPC_POST_IDC_ACK, &ha->dpc_flags); } if (is_qla8042(ha) && test_bit(DPC_RESTORE_ACB, &ha->dpc_flags)) { ql4_printk(KERN_INFO, ha, "%s: Restoring ACB\n", __func__); if (qla4_84xx_config_acb(ha, ACB_CONFIG_SET) != QLA_SUCCESS) { ql4_printk(KERN_INFO, ha, "%s: ACB config failed ", __func__); } clear_bit(DPC_RESTORE_ACB, &ha->dpc_flags); } if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) { qla4_8xxx_need_qsnt_handler(ha); } } if (!test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) && (test_bit(DPC_RESET_HA, &ha->dpc_flags) || test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) { if ((is_qla8022(ha) && ql4xdontresethba) || ((is_qla8032(ha) || is_qla8042(ha)) && qla4_83xx_idc_dontreset(ha))) { DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n", ha->host_no, __func__)); clear_bit(DPC_RESET_HA, &ha->dpc_flags); clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags); clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); goto dpc_post_reset_ha; } if (test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) || test_bit(DPC_RESET_HA, &ha->dpc_flags)) qla4xxx_recover_adapter(ha); if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) { uint8_t wait_time = RESET_INTR_TOV; while ((readw(&ha->reg->ctrl_status) & (CSR_SOFT_RESET | CSR_FORCE_SOFT_RESET)) != 0) { if (--wait_time == 0) break; msleep(1000); } if (wait_time == 0) DEBUG2(printk("scsi%ld: %s: SR|FSR " "bit not cleared-- resetting\n", ha->host_no, __func__)); qla4xxx_abort_active_cmds(ha, DID_RESET << 16); if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) { qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); status = qla4xxx_recover_adapter(ha); } clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags); if (status == QLA_SUCCESS) ha->isp_ops->enable_intrs(ha); } } dpc_post_reset_ha: /* ---- process AEN? --- */ if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags)) qla4xxx_process_aen(ha, PROCESS_ALL_AENS); /* ---- Get DHCP IP Address? --- */ if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags)) qla4xxx_get_dhcp_ip_address(ha); /* ---- relogin device? --- */ if (adapter_up(ha) && test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) { iscsi_host_for_each_session(ha->host, qla4xxx_dpc_relogin); } /* ---- link change? --- */ if (!test_bit(AF_LOOPBACK, &ha->flags) && test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) { if (!test_bit(AF_LINK_UP, &ha->flags)) { /* ---- link down? --- */ qla4xxx_mark_all_devices_missing(ha); } else { /* ---- link up? --- * * F/W will auto login to all devices ONLY ONCE after * link up during driver initialization and runtime * fatal error recovery. Therefore, the driver must * manually relogin to devices when recovering from * connection failures, logouts, expired KATO, etc. */ if (test_and_clear_bit(AF_BUILD_DDB_LIST, &ha->flags)) { qla4xxx_build_ddb_list(ha, ha->is_reset); iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb); } else qla4xxx_relogin_all_devices(ha); } } if (test_and_clear_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags)) { if (qla4xxx_sysfs_ddb_export(ha)) ql4_printk(KERN_ERR, ha, "%s: Error exporting ddb to sysfs\n", __func__); } } /** * qla4xxx_free_adapter - release the adapter * @ha: pointer to adapter structure **/ static void qla4xxx_free_adapter(struct scsi_qla_host *ha) { qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16); /* Turn-off interrupts on the card. */ ha->isp_ops->disable_intrs(ha); if (is_qla40XX(ha)) { writel(set_rmask(CSR_SCSI_PROCESSOR_INTR), &ha->reg->ctrl_status); readl(&ha->reg->ctrl_status); } else if (is_qla8022(ha)) { writel(0, &ha->qla4_82xx_reg->host_int); readl(&ha->qla4_82xx_reg->host_int); } else if (is_qla8032(ha) || is_qla8042(ha)) { writel(0, &ha->qla4_83xx_reg->risc_intr); readl(&ha->qla4_83xx_reg->risc_intr); } /* Remove timer thread, if present */ if (ha->timer_active) qla4xxx_stop_timer(ha); /* Kill the kernel thread for this host */ if (ha->dpc_thread) destroy_workqueue(ha->dpc_thread); /* Kill the kernel thread for this host */ if (ha->task_wq) destroy_workqueue(ha->task_wq); /* Put firmware in known state */ ha->isp_ops->reset_firmware(ha); if (is_qla80XX(ha)) { ha->isp_ops->idc_lock(ha); qla4_8xxx_clear_drv_active(ha); ha->isp_ops->idc_unlock(ha); } /* Detach interrupts */ qla4xxx_free_irqs(ha); /* free extra memory */ qla4xxx_mem_free(ha); } int qla4_8xxx_iospace_config(struct scsi_qla_host *ha) { int status = 0; unsigned long mem_base, mem_len; struct pci_dev *pdev = ha->pdev; status = pci_request_regions(pdev, DRIVER_NAME); if (status) { printk(KERN_WARNING "scsi(%ld) Failed to reserve PIO regions (%s) " "status=%d\n", ha->host_no, pci_name(pdev), status); goto iospace_error_exit; } DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n", __func__, pdev->revision)); ha->revision_id = pdev->revision; /* remap phys address */ mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ mem_len = pci_resource_len(pdev, 0); DEBUG2(printk(KERN_INFO "%s: ioremap from %lx a size of %lx\n", __func__, mem_base, mem_len)); /* mapping of pcibase pointer */ ha->nx_pcibase = (unsigned long)ioremap(mem_base, mem_len); if (!ha->nx_pcibase) { printk(KERN_ERR "cannot remap MMIO (%s), aborting\n", pci_name(pdev)); pci_release_regions(ha->pdev); goto iospace_error_exit; } /* Mapping of IO base pointer, door bell read and write pointer */ /* mapping of IO base pointer */ if (is_qla8022(ha)) { ha->qla4_82xx_reg = (struct device_reg_82xx __iomem *) ((uint8_t *)ha->nx_pcibase + 0xbc000 + (ha->pdev->devfn << 11)); ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 : QLA82XX_CAM_RAM_DB2); } else if (is_qla8032(ha) || is_qla8042(ha)) { ha->qla4_83xx_reg = (struct device_reg_83xx __iomem *) ((uint8_t *)ha->nx_pcibase); } return 0; iospace_error_exit: return -ENOMEM; } /*** * qla4xxx_iospace_config - maps registers * @ha: pointer to adapter structure * * This routines maps HBA's registers from the pci address space * into the kernel virtual address space for memory mapped i/o. **/ int qla4xxx_iospace_config(struct scsi_qla_host *ha) { unsigned long pio, pio_len, pio_flags; unsigned long mmio, mmio_len, mmio_flags; pio = pci_resource_start(ha->pdev, 0); pio_len = pci_resource_len(ha->pdev, 0); pio_flags = pci_resource_flags(ha->pdev, 0); if (pio_flags & IORESOURCE_IO) { if (pio_len < MIN_IOBASE_LEN) { ql4_printk(KERN_WARNING, ha, "Invalid PCI I/O region size\n"); pio = 0; } } else { ql4_printk(KERN_WARNING, ha, "region #0 not a PIO resource\n"); pio = 0; } /* Use MMIO operations for all accesses. */ mmio = pci_resource_start(ha->pdev, 1); mmio_len = pci_resource_len(ha->pdev, 1); mmio_flags = pci_resource_flags(ha->pdev, 1); if (!(mmio_flags & IORESOURCE_MEM)) { ql4_printk(KERN_ERR, ha, "region #0 not an MMIO resource, aborting\n"); goto iospace_error_exit; } if (mmio_len < MIN_IOBASE_LEN) { ql4_printk(KERN_ERR, ha, "Invalid PCI mem region size, aborting\n"); goto iospace_error_exit; } if (pci_request_regions(ha->pdev, DRIVER_NAME)) { ql4_printk(KERN_WARNING, ha, "Failed to reserve PIO/MMIO regions\n"); goto iospace_error_exit; } ha->pio_address = pio; ha->pio_length = pio_len; ha->reg = ioremap(mmio, MIN_IOBASE_LEN); if (!ha->reg) { ql4_printk(KERN_ERR, ha, "cannot remap MMIO, aborting\n"); goto iospace_error_exit; } return 0; iospace_error_exit: return -ENOMEM; } static struct isp_operations qla4xxx_isp_ops = { .iospace_config = qla4xxx_iospace_config, .pci_config = qla4xxx_pci_config, .disable_intrs = qla4xxx_disable_intrs, .enable_intrs = qla4xxx_enable_intrs, .start_firmware = qla4xxx_start_firmware, .intr_handler = qla4xxx_intr_handler, .interrupt_service_routine = qla4xxx_interrupt_service_routine, .reset_chip = qla4xxx_soft_reset, .reset_firmware = qla4xxx_hw_reset, .queue_iocb = qla4xxx_queue_iocb, .complete_iocb = qla4xxx_complete_iocb, .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out, .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in, .get_sys_info = qla4xxx_get_sys_info, .queue_mailbox_command = qla4xxx_queue_mbox_cmd, .process_mailbox_interrupt = qla4xxx_process_mbox_intr, }; static struct isp_operations qla4_82xx_isp_ops = { .iospace_config = qla4_8xxx_iospace_config, .pci_config = qla4_8xxx_pci_config, .disable_intrs = qla4_82xx_disable_intrs, .enable_intrs = qla4_82xx_enable_intrs, .start_firmware = qla4_8xxx_load_risc, .restart_firmware = qla4_82xx_try_start_fw, .intr_handler = qla4_82xx_intr_handler, .interrupt_service_routine = qla4_82xx_interrupt_service_routine, .need_reset = qla4_8xxx_need_reset, .reset_chip = qla4_82xx_isp_reset, .reset_firmware = qla4_8xxx_stop_firmware, .queue_iocb = qla4_82xx_queue_iocb, .complete_iocb = qla4_82xx_complete_iocb, .rd_shdw_req_q_out = qla4_82xx_rd_shdw_req_q_out, .rd_shdw_rsp_q_in = qla4_82xx_rd_shdw_rsp_q_in, .get_sys_info = qla4_8xxx_get_sys_info, .rd_reg_direct = qla4_82xx_rd_32, .wr_reg_direct = qla4_82xx_wr_32, .rd_reg_indirect = qla4_82xx_md_rd_32, .wr_reg_indirect = qla4_82xx_md_wr_32, .idc_lock = qla4_82xx_idc_lock, .idc_unlock = qla4_82xx_idc_unlock, .rom_lock_recovery = qla4_82xx_rom_lock_recovery, .queue_mailbox_command = qla4_82xx_queue_mbox_cmd, .process_mailbox_interrupt = qla4_82xx_process_mbox_intr, }; static struct isp_operations qla4_83xx_isp_ops = { .iospace_config = qla4_8xxx_iospace_config, .pci_config = qla4_8xxx_pci_config, .disable_intrs = qla4_83xx_disable_intrs, .enable_intrs = qla4_83xx_enable_intrs, .start_firmware = qla4_8xxx_load_risc, .restart_firmware = qla4_83xx_start_firmware, .intr_handler = qla4_83xx_intr_handler, .interrupt_service_routine = qla4_83xx_interrupt_service_routine, .need_reset = qla4_8xxx_need_reset, .reset_chip = qla4_83xx_isp_reset, .reset_firmware = qla4_8xxx_stop_firmware, .queue_iocb = qla4_83xx_queue_iocb, .complete_iocb = qla4_83xx_complete_iocb, .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out, .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in, .get_sys_info = qla4_8xxx_get_sys_info, .rd_reg_direct = qla4_83xx_rd_reg, .wr_reg_direct = qla4_83xx_wr_reg, .rd_reg_indirect = qla4_83xx_rd_reg_indirect, .wr_reg_indirect = qla4_83xx_wr_reg_indirect, .idc_lock = qla4_83xx_drv_lock, .idc_unlock = qla4_83xx_drv_unlock, .rom_lock_recovery = qla4_83xx_rom_lock_recovery, .queue_mailbox_command = qla4_83xx_queue_mbox_cmd, .process_mailbox_interrupt = qla4_83xx_process_mbox_intr, }; uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha) { return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out); } uint16_t qla4_82xx_rd_shdw_req_q_out(struct scsi_qla_host *ha) { return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->req_q_out)); } uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha) { return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in); } uint16_t qla4_82xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha) { return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->rsp_q_in)); } static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf) { struct scsi_qla_host *ha = data; char *str = buf; int rc; switch (type) { case ISCSI_BOOT_ETH_FLAGS: rc = sprintf(str, "%d\n", (char)SYSFS_FLAG_FW_SEL_BOOT); break; case ISCSI_BOOT_ETH_INDEX: rc = sprintf(str, "0\n"); break; case ISCSI_BOOT_ETH_MAC: rc = sysfs_format_mac(str, ha->my_mac, MAC_ADDR_LEN); break; default: rc = -ENOSYS; break; } return rc; } static umode_t qla4xxx_eth_get_attr_visibility(void *data, int type) { int rc; switch (type) { case ISCSI_BOOT_ETH_FLAGS: case ISCSI_BOOT_ETH_MAC: case ISCSI_BOOT_ETH_INDEX: rc = S_IRUGO; break; default: rc = 0; break; } return rc; } static ssize_t qla4xxx_show_boot_ini_info(void *data, int type, char *buf) { struct scsi_qla_host *ha = data; char *str = buf; int rc; switch (type) { case ISCSI_BOOT_INI_INITIATOR_NAME: rc = sprintf(str, "%s\n", ha->name_string); break; default: rc = -ENOSYS; break; } return rc; } static umode_t qla4xxx_ini_get_attr_visibility(void *data, int type) { int rc; switch (type) { case ISCSI_BOOT_INI_INITIATOR_NAME: rc = S_IRUGO; break; default: rc = 0; break; } return rc; } static ssize_t qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type, char *buf) { struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0]; char *str = buf; int rc; switch (type) { case ISCSI_BOOT_TGT_NAME: rc = sprintf(buf, "%s\n", (char *)&boot_sess->target_name); break; case ISCSI_BOOT_TGT_IP_ADDR: if (boot_sess->conn_list[0].dest_ipaddr.ip_type == 0x1) rc = sprintf(buf, "%pI4\n", &boot_conn->dest_ipaddr.ip_address); else rc = sprintf(str, "%pI6\n", &boot_conn->dest_ipaddr.ip_address); break; case ISCSI_BOOT_TGT_PORT: rc = sprintf(str, "%d\n", boot_conn->dest_port); break; case ISCSI_BOOT_TGT_CHAP_NAME: rc = sprintf(str, "%.*s\n", boot_conn->chap.target_chap_name_length, (char *)&boot_conn->chap.target_chap_name); break; case ISCSI_BOOT_TGT_CHAP_SECRET: rc = sprintf(str, "%.*s\n", boot_conn->chap.target_secret_length, (char *)&boot_conn->chap.target_secret); break; case ISCSI_BOOT_TGT_REV_CHAP_NAME: rc = sprintf(str, "%.*s\n", boot_conn->chap.intr_chap_name_length, (char *)&boot_conn->chap.intr_chap_name); break; case ISCSI_BOOT_TGT_REV_CHAP_SECRET: rc = sprintf(str, "%.*s\n", boot_conn->chap.intr_secret_length, (char *)&boot_conn->chap.intr_secret); break; case ISCSI_BOOT_TGT_FLAGS: rc = sprintf(str, "%d\n", (char)SYSFS_FLAG_FW_SEL_BOOT); break; case ISCSI_BOOT_TGT_NIC_ASSOC: rc = sprintf(str, "0\n"); break; default: rc = -ENOSYS; break; } return rc; } static ssize_t qla4xxx_show_boot_tgt_pri_info(void *data, int type, char *buf) { struct scsi_qla_host *ha = data; struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_pri_sess); return qla4xxx_show_boot_tgt_info(boot_sess, type, buf); } static ssize_t qla4xxx_show_boot_tgt_sec_info(void *data, int type, char *buf) { struct scsi_qla_host *ha = data; struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_sec_sess); return qla4xxx_show_boot_tgt_info(boot_sess, type, buf); } static umode_t qla4xxx_tgt_get_attr_visibility(void *data, int type) { int rc; switch (type) { case ISCSI_BOOT_TGT_NAME: case ISCSI_BOOT_TGT_IP_ADDR: case ISCSI_BOOT_TGT_PORT: case ISCSI_BOOT_TGT_CHAP_NAME: case ISCSI_BOOT_TGT_CHAP_SECRET: case ISCSI_BOOT_TGT_REV_CHAP_NAME: case ISCSI_BOOT_TGT_REV_CHAP_SECRET: case ISCSI_BOOT_TGT_NIC_ASSOC: case ISCSI_BOOT_TGT_FLAGS: rc = S_IRUGO; break; default: rc = 0; break; } return rc; } static void qla4xxx_boot_release(void *data) { struct scsi_qla_host *ha = data; scsi_host_put(ha->host); } static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[]) { dma_addr_t buf_dma; uint32_t addr, pri_addr, sec_addr; uint32_t offset; uint16_t func_num; uint8_t val; uint8_t *buf = NULL; size_t size = 13 * sizeof(uint8_t); int ret = QLA_SUCCESS; func_num = PCI_FUNC(ha->pdev->devfn); ql4_printk(KERN_INFO, ha, "%s: Get FW boot info for 0x%x func %d\n", __func__, ha->pdev->device, func_num); if (is_qla40XX(ha)) { if (func_num == 1) { addr = NVRAM_PORT0_BOOT_MODE; pri_addr = NVRAM_PORT0_BOOT_PRI_TGT; sec_addr = NVRAM_PORT0_BOOT_SEC_TGT; } else if (func_num == 3) { addr = NVRAM_PORT1_BOOT_MODE; pri_addr = NVRAM_PORT1_BOOT_PRI_TGT; sec_addr = NVRAM_PORT1_BOOT_SEC_TGT; } else { ret = QLA_ERROR; goto exit_boot_info; } /* Check Boot Mode */ val = rd_nvram_byte(ha, addr); if (!(val & 0x07)) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Adapter boot " "options : 0x%x\n", __func__, val)); ret = QLA_ERROR; goto exit_boot_info; } /* get primary valid target index */ val = rd_nvram_byte(ha, pri_addr); if (val & BIT_7) ddb_index[0] = (val & 0x7f); /* get secondary valid target index */ val = rd_nvram_byte(ha, sec_addr); if (val & BIT_7) ddb_index[1] = (val & 0x7f); goto exit_boot_info; } else if (is_qla80XX(ha)) { buf = dma_alloc_coherent(&ha->pdev->dev, size, &buf_dma, GFP_KERNEL); if (!buf) { DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to allocate dma buffer\n", __func__)); ret = QLA_ERROR; goto exit_boot_info; } if (ha->port_num == 0) offset = BOOT_PARAM_OFFSET_PORT0; else if (ha->port_num == 1) offset = BOOT_PARAM_OFFSET_PORT1; else { ret = QLA_ERROR; goto exit_boot_info_free; } addr = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_iscsi_param * 4) + offset; if (qla4xxx_get_flash(ha, buf_dma, addr, 13 * sizeof(uint8_t)) != QLA_SUCCESS) { DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash" " failed\n", ha->host_no, __func__)); ret = QLA_ERROR; goto exit_boot_info_free; } /* Check Boot Mode */ if (!(buf[1] & 0x07)) { DEBUG2(ql4_printk(KERN_INFO, ha, "Firmware boot options" " : 0x%x\n", buf[1])); ret = QLA_ERROR; goto exit_boot_info_free; } /* get primary valid target index */ if (buf[2] & BIT_7) ddb_index[0] = buf[2] & 0x7f; /* get secondary valid target index */ if (buf[11] & BIT_7) ddb_index[1] = buf[11] & 0x7f; } else { ret = QLA_ERROR; goto exit_boot_info; } DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary target ID %d, Secondary" " target ID %d\n", __func__, ddb_index[0], ddb_index[1])); exit_boot_info_free: dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma); exit_boot_info: ha->pri_ddb_idx = ddb_index[0]; ha->sec_ddb_idx = ddb_index[1]; return ret; } /** * qla4xxx_get_bidi_chap - Get a BIDI CHAP user and password * @ha: pointer to adapter structure * @username: CHAP username to be returned * @password: CHAP password to be returned * * If a boot entry has BIDI CHAP enabled then we need to set the BIDI CHAP * user and password in the sysfs entry in /sys/firmware/iscsi_boot#/. * So from the CHAP cache find the first BIDI CHAP entry and set it * to the boot record in sysfs. **/ static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username, char *password) { int i, ret = -EINVAL; int max_chap_entries = 0; struct ql4_chap_table *chap_table; if (is_qla80XX(ha)) max_chap_entries = (ha->hw.flt_chap_size / 2) / sizeof(struct ql4_chap_table); else max_chap_entries = MAX_CHAP_ENTRIES_40XX; if (!ha->chap_list) { ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n"); return ret; } mutex_lock(&ha->chap_sem); for (i = 0; i < max_chap_entries; i++) { chap_table = (struct ql4_chap_table *)ha->chap_list + i; if (chap_table->cookie != cpu_to_le16(CHAP_VALID_COOKIE)) { continue; } if (chap_table->flags & BIT_7) /* local */ continue; if (!(chap_table->flags & BIT_6)) /* Not BIDI */ continue; strscpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN); strscpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN); ret = 0; break; } mutex_unlock(&ha->chap_sem); return ret; } static int qla4xxx_get_boot_target(struct scsi_qla_host *ha, struct ql4_boot_session_info *boot_sess, uint16_t ddb_index) { struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0]; struct dev_db_entry *fw_ddb_entry; dma_addr_t fw_ddb_entry_dma; uint16_t idx; uint16_t options; int ret = QLA_SUCCESS; fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), &fw_ddb_entry_dma, GFP_KERNEL); if (!fw_ddb_entry) { DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to allocate dma buffer.\n", __func__)); ret = QLA_ERROR; return ret; } if (qla4xxx_bootdb_by_index(ha, fw_ddb_entry, fw_ddb_entry_dma, ddb_index)) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: No Flash DDB found at " "index [%d]\n", __func__, ddb_index)); ret = QLA_ERROR; goto exit_boot_target; } /* Update target name and IP from DDB */ memcpy(boot_sess->target_name, fw_ddb_entry->iscsi_name, min(sizeof(boot_sess->target_name), sizeof(fw_ddb_entry->iscsi_name))); options = le16_to_cpu(fw_ddb_entry->options); if (options & DDB_OPT_IPV6_DEVICE) { memcpy(&boot_conn->dest_ipaddr.ip_address, &fw_ddb_entry->ip_addr[0], IPv6_ADDR_LEN); } else { boot_conn->dest_ipaddr.ip_type = 0x1; memcpy(&boot_conn->dest_ipaddr.ip_address, &fw_ddb_entry->ip_addr[0], IP_ADDR_LEN); } boot_conn->dest_port = le16_to_cpu(fw_ddb_entry->port); /* update chap information */ idx = __le16_to_cpu(fw_ddb_entry->chap_tbl_idx); if (BIT_7 & le16_to_cpu(fw_ddb_entry->iscsi_options)) { DEBUG2(ql4_printk(KERN_INFO, ha, "Setting chap\n")); ret = qla4xxx_get_chap(ha, (char *)&boot_conn->chap. target_chap_name, (char *)&boot_conn->chap.target_secret, idx); if (ret) { ql4_printk(KERN_ERR, ha, "Failed to set chap\n"); ret = QLA_ERROR; goto exit_boot_target; } boot_conn->chap.target_chap_name_length = QL4_CHAP_MAX_NAME_LEN; boot_conn->chap.target_secret_length = QL4_CHAP_MAX_SECRET_LEN; } if (BIT_4 & le16_to_cpu(fw_ddb_entry->iscsi_options)) { DEBUG2(ql4_printk(KERN_INFO, ha, "Setting BIDI chap\n")); ret = qla4xxx_get_bidi_chap(ha, (char *)&boot_conn->chap.intr_chap_name, (char *)&boot_conn->chap.intr_secret); if (ret) { ql4_printk(KERN_ERR, ha, "Failed to set BIDI chap\n"); ret = QLA_ERROR; goto exit_boot_target; } boot_conn->chap.intr_chap_name_length = QL4_CHAP_MAX_NAME_LEN; boot_conn->chap.intr_secret_length = QL4_CHAP_MAX_SECRET_LEN; } exit_boot_target: dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry, fw_ddb_entry_dma); return ret; } static int qla4xxx_get_boot_info(struct scsi_qla_host *ha) { uint16_t ddb_index[2]; int ret = QLA_ERROR; int rval; memset(ddb_index, 0, sizeof(ddb_index)); ddb_index[0] = 0xffff; ddb_index[1] = 0xffff; ret = get_fw_boot_info(ha, ddb_index); if (ret != QLA_SUCCESS) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: No boot target configured.\n", __func__)); return ret; } if (ql4xdisablesysfsboot) return QLA_SUCCESS; if (ddb_index[0] == 0xffff) goto sec_target; rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_pri_sess), ddb_index[0]); if (rval != QLA_SUCCESS) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary boot target not " "configured\n", __func__)); } else ret = QLA_SUCCESS; sec_target: if (ddb_index[1] == 0xffff) goto exit_get_boot_info; rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_sec_sess), ddb_index[1]); if (rval != QLA_SUCCESS) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Secondary boot target not" " configured\n", __func__)); } else ret = QLA_SUCCESS; exit_get_boot_info: return ret; } static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha) { struct iscsi_boot_kobj *boot_kobj; if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS) return QLA_ERROR; if (ql4xdisablesysfsboot) { ql4_printk(KERN_INFO, ha, "%s: syfsboot disabled - driver will trigger login " "and publish session for discovery .\n", __func__); return QLA_SUCCESS; } ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no); if (!ha->boot_kset) goto kset_free; if (!scsi_host_get(ha->host)) goto kset_free; boot_kobj = iscsi_boot_create_target(ha->boot_kset, 0, ha, qla4xxx_show_boot_tgt_pri_info, qla4xxx_tgt_get_attr_visibility, qla4xxx_boot_release); if (!boot_kobj) goto put_host; if (!scsi_host_get(ha->host)) goto kset_free; boot_kobj = iscsi_boot_create_target(ha->boot_kset, 1, ha, qla4xxx_show_boot_tgt_sec_info, qla4xxx_tgt_get_attr_visibility, qla4xxx_boot_release); if (!boot_kobj) goto put_host; if (!scsi_host_get(ha->host)) goto kset_free; boot_kobj = iscsi_boot_create_initiator(ha->boot_kset, 0, ha, qla4xxx_show_boot_ini_info, qla4xxx_ini_get_attr_visibility, qla4xxx_boot_release); if (!boot_kobj) goto put_host; if (!scsi_host_get(ha->host)) goto kset_free; boot_kobj = iscsi_boot_create_ethernet(ha->boot_kset, 0, ha, qla4xxx_show_boot_eth_info, qla4xxx_eth_get_attr_visibility, qla4xxx_boot_release); if (!boot_kobj) goto put_host; return QLA_SUCCESS; put_host: scsi_host_put(ha->host); kset_free: iscsi_boot_destroy_kset(ha->boot_kset); return -ENOMEM; } static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry, struct ql4_tuple_ddb *tddb) { struct iscsi_cls_session *cls_sess; struct iscsi_cls_conn *cls_conn; struct iscsi_session *sess; struct iscsi_conn *conn; DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); cls_sess = ddb_entry->sess; sess = cls_sess->dd_data; cls_conn = ddb_entry->conn; conn = cls_conn->dd_data; tddb->tpgt = sess->tpgt; tddb->port = conn->persistent_port; strscpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE); strscpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN); } static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry, struct ql4_tuple_ddb *tddb, uint8_t *flash_isid) { uint16_t options = 0; tddb->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); memcpy(&tddb->iscsi_name[0], &fw_ddb_entry->iscsi_name[0], min(sizeof(tddb->iscsi_name), sizeof(fw_ddb_entry->iscsi_name))); options = le16_to_cpu(fw_ddb_entry->options); if (options & DDB_OPT_IPV6_DEVICE) sprintf(tddb->ip_addr, "%pI6", fw_ddb_entry->ip_addr); else sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr); tddb->port = le16_to_cpu(fw_ddb_entry->port); if (flash_isid == NULL) memcpy(&tddb->isid[0], &fw_ddb_entry->isid[0], sizeof(tddb->isid)); else memcpy(&tddb->isid[0], &flash_isid[0], sizeof(tddb->isid)); } static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha, struct ql4_tuple_ddb *old_tddb, struct ql4_tuple_ddb *new_tddb, uint8_t is_isid_compare) { if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name)) return QLA_ERROR; if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr)) return QLA_ERROR; if (old_tddb->port != new_tddb->port) return QLA_ERROR; /* For multi sessions, driver generates the ISID, so do not compare * ISID in reset path since it would be a comparison between the * driver generated ISID and firmware generated ISID. This could * lead to adding duplicated DDBs in the list as driver generated * ISID would not match firmware generated ISID. */ if (is_isid_compare) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: old ISID [%pmR] New ISID [%pmR]\n", __func__, old_tddb->isid, new_tddb->isid)); if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0], sizeof(old_tddb->isid))) return QLA_ERROR; } DEBUG2(ql4_printk(KERN_INFO, ha, "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]", old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr, old_tddb->iscsi_name, new_tddb->port, new_tddb->tpgt, new_tddb->ip_addr, new_tddb->iscsi_name)); return QLA_SUCCESS; } static int qla4xxx_is_session_exists(struct scsi_qla_host *ha, struct dev_db_entry *fw_ddb_entry, uint32_t *index) { struct ddb_entry *ddb_entry; struct ql4_tuple_ddb *fw_tddb = NULL; struct ql4_tuple_ddb *tmp_tddb = NULL; int idx; int ret = QLA_ERROR; fw_tddb = vzalloc(sizeof(*fw_tddb)); if (!fw_tddb) { DEBUG2(ql4_printk(KERN_WARNING, ha, "Memory Allocation failed.\n")); ret = QLA_SUCCESS; goto exit_check; } tmp_tddb = vzalloc(sizeof(*tmp_tddb)); if (!tmp_tddb) { DEBUG2(ql4_printk(KERN_WARNING, ha, "Memory Allocation failed.\n")); ret = QLA_SUCCESS; goto exit_check; } qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL); for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) { ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); if (ddb_entry == NULL) continue; qla4xxx_get_param_ddb(ddb_entry, tmp_tddb); if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, false)) { ret = QLA_SUCCESS; /* found */ if (index != NULL) *index = idx; goto exit_check; } } exit_check: vfree(fw_tddb); vfree(tmp_tddb); return ret; } /** * qla4xxx_check_existing_isid - check if target with same isid exist * in target list * @list_nt: list of target * @isid: isid to check * * This routine return QLA_SUCCESS if target with same isid exist **/ static int qla4xxx_check_existing_isid(struct list_head *list_nt, uint8_t *isid) { struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp; struct dev_db_entry *fw_ddb_entry; list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) { fw_ddb_entry = &nt_ddb_idx->fw_ddb; if (memcmp(&fw_ddb_entry->isid[0], &isid[0], sizeof(nt_ddb_idx->fw_ddb.isid)) == 0) { return QLA_SUCCESS; } } return QLA_ERROR; } /** * qla4xxx_update_isid - compare ddbs and updated isid * @ha: Pointer to host adapter structure. * @list_nt: list of nt target * @fw_ddb_entry: firmware ddb entry * * This routine update isid if ddbs have same iqn, same isid and * different IP addr. * Return QLA_SUCCESS if isid is updated. **/ static int qla4xxx_update_isid(struct scsi_qla_host *ha, struct list_head *list_nt, struct dev_db_entry *fw_ddb_entry) { uint8_t base_value, i; base_value = fw_ddb_entry->isid[1] & 0x1f; for (i = 0; i < 8; i++) { fw_ddb_entry->isid[1] = (base_value | (i << 5)); if (qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid)) break; } if (!qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid)) return QLA_ERROR; return QLA_SUCCESS; } /** * qla4xxx_should_update_isid - check if isid need to update * @ha: Pointer to host adapter structure. * @old_tddb: ddb tuple * @new_tddb: ddb tuple * * Return QLA_SUCCESS if different IP, different PORT, same iqn, * same isid **/ static int qla4xxx_should_update_isid(struct scsi_qla_host *ha, struct ql4_tuple_ddb *old_tddb, struct ql4_tuple_ddb *new_tddb) { if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr) == 0) { /* Same ip */ if (old_tddb->port == new_tddb->port) return QLA_ERROR; } if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name)) /* different iqn */ return QLA_ERROR; if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0], sizeof(old_tddb->isid))) /* different isid */ return QLA_ERROR; return QLA_SUCCESS; } /** * qla4xxx_is_flash_ddb_exists - check if fw_ddb_entry already exists in list_nt * @ha: Pointer to host adapter structure. * @list_nt: list of nt target. * @fw_ddb_entry: firmware ddb entry. * * This routine check if fw_ddb_entry already exists in list_nt to avoid * duplicate ddb in list_nt. * Return QLA_SUCCESS if duplicate ddb exit in list_nl. * Note: This function also update isid of DDB if required. **/ static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha, struct list_head *list_nt, struct dev_db_entry *fw_ddb_entry) { struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp; struct ql4_tuple_ddb *fw_tddb = NULL; struct ql4_tuple_ddb *tmp_tddb = NULL; int rval, ret = QLA_ERROR; fw_tddb = vzalloc(sizeof(*fw_tddb)); if (!fw_tddb) { DEBUG2(ql4_printk(KERN_WARNING, ha, "Memory Allocation failed.\n")); ret = QLA_SUCCESS; goto exit_check; } tmp_tddb = vzalloc(sizeof(*tmp_tddb)); if (!tmp_tddb) { DEBUG2(ql4_printk(KERN_WARNING, ha, "Memory Allocation failed.\n")); ret = QLA_SUCCESS; goto exit_check; } qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL); list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) { qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb, nt_ddb_idx->flash_isid); ret = qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, true); /* found duplicate ddb */ if (ret == QLA_SUCCESS) goto exit_check; } list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) { qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb, NULL); ret = qla4xxx_should_update_isid(ha, tmp_tddb, fw_tddb); if (ret == QLA_SUCCESS) { rval = qla4xxx_update_isid(ha, list_nt, fw_ddb_entry); if (rval == QLA_SUCCESS) ret = QLA_ERROR; else ret = QLA_SUCCESS; goto exit_check; } } exit_check: vfree(fw_tddb); vfree(tmp_tddb); return ret; } static void qla4xxx_free_ddb_list(struct list_head *list_ddb) { struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) { list_del_init(&ddb_idx->list); vfree(ddb_idx); } } static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha, struct dev_db_entry *fw_ddb_entry) { struct iscsi_endpoint *ep; struct sockaddr_in *addr; struct sockaddr_in6 *addr6; struct sockaddr *t_addr; struct sockaddr_storage *dst_addr; char *ip; /* TODO: need to destroy on unload iscsi_endpoint*/ dst_addr = vmalloc(sizeof(*dst_addr)); if (!dst_addr) return NULL; if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) { t_addr = (struct sockaddr *)dst_addr; t_addr->sa_family = AF_INET6; addr6 = (struct sockaddr_in6 *)dst_addr; ip = (char *)&addr6->sin6_addr; memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN); addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port)); } else { t_addr = (struct sockaddr *)dst_addr; t_addr->sa_family = AF_INET; addr = (struct sockaddr_in *)dst_addr; ip = (char *)&addr->sin_addr; memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN); addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port)); } ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0); vfree(dst_addr); return ep; } static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx) { if (ql4xdisablesysfsboot) return QLA_SUCCESS; if (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx) return QLA_ERROR; return QLA_SUCCESS; } static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry, uint16_t idx) { uint16_t def_timeout; ddb_entry->ddb_type = FLASH_DDB; ddb_entry->fw_ddb_index = INVALID_ENTRY; ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE; ddb_entry->ha = ha; ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb; ddb_entry->ddb_change = qla4xxx_flash_ddb_change; ddb_entry->chap_tbl_idx = INVALID_ENTRY; atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY); atomic_set(&ddb_entry->relogin_timer, 0); atomic_set(&ddb_entry->relogin_retry_count, 0); def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout); ddb_entry->default_relogin_timeout = (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ? def_timeout : LOGIN_TOV; ddb_entry->default_time2wait = le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait); if (ql4xdisablesysfsboot && (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx)) set_bit(DF_BOOT_TGT, &ddb_entry->flags); } static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha) { uint32_t idx = 0; uint32_t ip_idx[IP_ADDR_COUNT] = {0, 1, 2, 3}; /* 4 IP interfaces */ uint32_t sts[MBOX_REG_COUNT]; uint32_t ip_state; unsigned long wtime; int ret; wtime = jiffies + (HZ * IP_CONFIG_TOV); do { for (idx = 0; idx < IP_ADDR_COUNT; idx++) { if (ip_idx[idx] == -1) continue; ret = qla4xxx_get_ip_state(ha, 0, ip_idx[idx], sts); if (ret == QLA_ERROR) { ip_idx[idx] = -1; continue; } ip_state = (sts[1] & IP_STATE_MASK) >> IP_STATE_SHIFT; DEBUG2(ql4_printk(KERN_INFO, ha, "Waiting for IP state for idx = %d, state = 0x%x\n", ip_idx[idx], ip_state)); if (ip_state == IP_ADDRSTATE_UNCONFIGURED || ip_state == IP_ADDRSTATE_INVALID || ip_state == IP_ADDRSTATE_PREFERRED || ip_state == IP_ADDRSTATE_DEPRICATED || ip_state == IP_ADDRSTATE_DISABLING) ip_idx[idx] = -1; } /* Break if all IP states checked */ if ((ip_idx[0] == -1) && (ip_idx[1] == -1) && (ip_idx[2] == -1) && (ip_idx[3] == -1)) break; schedule_timeout_uninterruptible(HZ); } while (time_after(wtime, jiffies)); } static int qla4xxx_cmp_fw_stentry(struct dev_db_entry *fw_ddb_entry, struct dev_db_entry *flash_ddb_entry) { uint16_t options = 0; size_t ip_len = IP_ADDR_LEN; options = le16_to_cpu(fw_ddb_entry->options); if (options & DDB_OPT_IPV6_DEVICE) ip_len = IPv6_ADDR_LEN; if (memcmp(fw_ddb_entry->ip_addr, flash_ddb_entry->ip_addr, ip_len)) return QLA_ERROR; if (memcmp(&fw_ddb_entry->isid[0], &flash_ddb_entry->isid[0], sizeof(fw_ddb_entry->isid))) return QLA_ERROR; if (memcmp(&fw_ddb_entry->port, &flash_ddb_entry->port, sizeof(fw_ddb_entry->port))) return QLA_ERROR; return QLA_SUCCESS; } static int qla4xxx_find_flash_st_idx(struct scsi_qla_host *ha, struct dev_db_entry *fw_ddb_entry, uint32_t fw_idx, uint32_t *flash_index) { struct dev_db_entry *flash_ddb_entry; dma_addr_t flash_ddb_entry_dma; uint32_t idx = 0; int max_ddbs; int ret = QLA_ERROR, status; max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : MAX_DEV_DB_ENTRIES; flash_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, &flash_ddb_entry_dma); if (flash_ddb_entry == NULL || fw_ddb_entry == NULL) { ql4_printk(KERN_ERR, ha, "Out of memory\n"); goto exit_find_st_idx; } status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry, flash_ddb_entry_dma, fw_idx); if (status == QLA_SUCCESS) { status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry); if (status == QLA_SUCCESS) { *flash_index = fw_idx; ret = QLA_SUCCESS; goto exit_find_st_idx; } } for (idx = 0; idx < max_ddbs; idx++) { status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry, flash_ddb_entry_dma, idx); if (status == QLA_ERROR) continue; status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry); if (status == QLA_SUCCESS) { *flash_index = idx; ret = QLA_SUCCESS; goto exit_find_st_idx; } } if (idx == max_ddbs) ql4_printk(KERN_ERR, ha, "Failed to find ST [%d] in flash\n", fw_idx); exit_find_st_idx: if (flash_ddb_entry) dma_pool_free(ha->fw_ddb_dma_pool, flash_ddb_entry, flash_ddb_entry_dma); return ret; } static void qla4xxx_build_st_list(struct scsi_qla_host *ha, struct list_head *list_st) { struct qla_ddb_index *st_ddb_idx; int max_ddbs; int fw_idx_size; struct dev_db_entry *fw_ddb_entry; dma_addr_t fw_ddb_dma; int ret; uint32_t idx = 0, next_idx = 0; uint32_t state = 0, conn_err = 0; uint32_t flash_index = -1; uint16_t conn_id = 0; fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, &fw_ddb_dma); if (fw_ddb_entry == NULL) { DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n")); goto exit_st_list; } max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : MAX_DEV_DB_ENTRIES; fw_idx_size = sizeof(struct qla_ddb_index); for (idx = 0; idx < max_ddbs; idx = next_idx) { ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma, NULL, &next_idx, &state, &conn_err, NULL, &conn_id); if (ret == QLA_ERROR) break; /* Ignore DDB if invalid state (unassigned) */ if (state == DDB_DS_UNASSIGNED) goto continue_next_st; /* Check if ST, add to the list_st */ if (strlen((char *) fw_ddb_entry->iscsi_name) != 0) goto continue_next_st; st_ddb_idx = vzalloc(fw_idx_size); if (!st_ddb_idx) break; ret = qla4xxx_find_flash_st_idx(ha, fw_ddb_entry, idx, &flash_index); if (ret == QLA_ERROR) { ql4_printk(KERN_ERR, ha, "No flash entry for ST at idx [%d]\n", idx); st_ddb_idx->flash_ddb_idx = idx; } else { ql4_printk(KERN_INFO, ha, "ST at idx [%d] is stored at flash [%d]\n", idx, flash_index); st_ddb_idx->flash_ddb_idx = flash_index; } st_ddb_idx->fw_ddb_idx = idx; list_add_tail(&st_ddb_idx->list, list_st); continue_next_st: if (next_idx == 0) break; } exit_st_list: if (fw_ddb_entry) dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); } /** * qla4xxx_remove_failed_ddb - Remove inactive or failed ddb from list * @ha: pointer to adapter structure * @list_ddb: List from which failed ddb to be removed * * Iterate over the list of DDBs and find and remove DDBs that are either in * no connection active state or failed state **/ static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha, struct list_head *list_ddb) { struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; uint32_t next_idx = 0; uint32_t state = 0, conn_err = 0; int ret; list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) { ret = qla4xxx_get_fwddb_entry(ha, ddb_idx->fw_ddb_idx, NULL, 0, NULL, &next_idx, &state, &conn_err, NULL, NULL); if (ret == QLA_ERROR) continue; if (state == DDB_DS_NO_CONNECTION_ACTIVE || state == DDB_DS_SESSION_FAILED) { list_del_init(&ddb_idx->list); vfree(ddb_idx); } } } static void qla4xxx_update_sess_disc_idx(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry, struct dev_db_entry *fw_ddb_entry) { struct iscsi_cls_session *cls_sess; struct iscsi_session *sess; uint32_t max_ddbs = 0; uint16_t ddb_link = -1; max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : MAX_DEV_DB_ENTRIES; cls_sess = ddb_entry->sess; sess = cls_sess->dd_data; ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); if (ddb_link < max_ddbs) sess->discovery_parent_idx = ddb_link; else sess->discovery_parent_idx = DDB_NO_LINK; } static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha, struct dev_db_entry *fw_ddb_entry, int is_reset, uint16_t idx) { struct iscsi_cls_session *cls_sess; struct iscsi_session *sess; struct iscsi_cls_conn *cls_conn; struct iscsi_endpoint *ep; uint16_t cmds_max = 32; uint16_t conn_id = 0; uint32_t initial_cmdsn = 0; int ret = QLA_SUCCESS; struct ddb_entry *ddb_entry = NULL; /* Create session object, with INVALID_ENTRY, * the targer_id would get set when we issue the login */ cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host, cmds_max, sizeof(struct ddb_entry), sizeof(struct ql4_task_data), initial_cmdsn, INVALID_ENTRY); if (!cls_sess) { ret = QLA_ERROR; goto exit_setup; } /* * so calling module_put function to decrement the * reference count. **/ module_put(qla4xxx_iscsi_transport.owner); sess = cls_sess->dd_data; ddb_entry = sess->dd_data; ddb_entry->sess = cls_sess; cls_sess->recovery_tmo = ql4xsess_recovery_tmo; memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry, sizeof(struct dev_db_entry)); qla4xxx_setup_flash_ddb_entry(ha, ddb_entry, idx); cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id); if (!cls_conn) { ret = QLA_ERROR; goto exit_setup; } ddb_entry->conn = cls_conn; /* Setup ep, for displaying attributes in sysfs */ ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry); if (ep) { ep->conn = cls_conn; cls_conn->ep = ep; } else { DEBUG2(ql4_printk(KERN_ERR, ha, "Unable to get ep\n")); ret = QLA_ERROR; goto exit_setup; } /* Update sess/conn params */ qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn); qla4xxx_update_sess_disc_idx(ha, ddb_entry, fw_ddb_entry); if (is_reset == RESET_ADAPTER) { iscsi_block_session(cls_sess); /* Use the relogin path to discover new devices * by short-circuiting the logic of setting * timer to relogin - instead set the flags * to initiate login right away. */ set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags); set_bit(DF_RELOGIN, &ddb_entry->flags); } exit_setup: return ret; } static void qla4xxx_update_fw_ddb_link(struct scsi_qla_host *ha, struct list_head *list_ddb, struct dev_db_entry *fw_ddb_entry) { struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; uint16_t ddb_link; ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) { if (ddb_idx->fw_ddb_idx == ddb_link) { DEBUG2(ql4_printk(KERN_INFO, ha, "Updating NT parent idx from [%d] to [%d]\n", ddb_link, ddb_idx->flash_ddb_idx)); fw_ddb_entry->ddb_link = cpu_to_le16(ddb_idx->flash_ddb_idx); return; } } } static void qla4xxx_build_nt_list(struct scsi_qla_host *ha, struct list_head *list_nt, struct list_head *list_st, int is_reset) { struct dev_db_entry *fw_ddb_entry; struct ddb_entry *ddb_entry = NULL; dma_addr_t fw_ddb_dma; int max_ddbs; int fw_idx_size; int ret; uint32_t idx = 0, next_idx = 0; uint32_t state = 0, conn_err = 0; uint32_t ddb_idx = -1; uint16_t conn_id = 0; uint16_t ddb_link = -1; struct qla_ddb_index *nt_ddb_idx; fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, &fw_ddb_dma); if (fw_ddb_entry == NULL) { DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n")); goto exit_nt_list; } max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : MAX_DEV_DB_ENTRIES; fw_idx_size = sizeof(struct qla_ddb_index); for (idx = 0; idx < max_ddbs; idx = next_idx) { ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma, NULL, &next_idx, &state, &conn_err, NULL, &conn_id); if (ret == QLA_ERROR) break; if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS) goto continue_next_nt; /* Check if NT, then add to list it */ if (strlen((char *) fw_ddb_entry->iscsi_name) == 0) goto continue_next_nt; ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); if (ddb_link < max_ddbs) qla4xxx_update_fw_ddb_link(ha, list_st, fw_ddb_entry); if (!(state == DDB_DS_NO_CONNECTION_ACTIVE || state == DDB_DS_SESSION_FAILED) && (is_reset == INIT_ADAPTER)) goto continue_next_nt; DEBUG2(ql4_printk(KERN_INFO, ha, "Adding DDB to session = 0x%x\n", idx)); if (is_reset == INIT_ADAPTER) { nt_ddb_idx = vmalloc(fw_idx_size); if (!nt_ddb_idx) break; nt_ddb_idx->fw_ddb_idx = idx; /* Copy original isid as it may get updated in function * qla4xxx_update_isid(). We need original isid in * function qla4xxx_compare_tuple_ddb to find duplicate * target */ memcpy(&nt_ddb_idx->flash_isid[0], &fw_ddb_entry->isid[0], sizeof(nt_ddb_idx->flash_isid)); ret = qla4xxx_is_flash_ddb_exists(ha, list_nt, fw_ddb_entry); if (ret == QLA_SUCCESS) { /* free nt_ddb_idx and do not add to list_nt */ vfree(nt_ddb_idx); goto continue_next_nt; } /* Copy updated isid */ memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry, sizeof(struct dev_db_entry)); list_add_tail(&nt_ddb_idx->list, list_nt); } else if (is_reset == RESET_ADAPTER) { ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, &ddb_idx); if (ret == QLA_SUCCESS) { ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, ddb_idx); if (ddb_entry != NULL) qla4xxx_update_sess_disc_idx(ha, ddb_entry, fw_ddb_entry); goto continue_next_nt; } } ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset, idx); if (ret == QLA_ERROR) goto exit_nt_list; continue_next_nt: if (next_idx == 0) break; } exit_nt_list: if (fw_ddb_entry) dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); } static void qla4xxx_build_new_nt_list(struct scsi_qla_host *ha, struct list_head *list_nt, uint16_t target_id) { struct dev_db_entry *fw_ddb_entry; dma_addr_t fw_ddb_dma; int max_ddbs; int fw_idx_size; int ret; uint32_t idx = 0, next_idx = 0; uint32_t state = 0, conn_err = 0; uint16_t conn_id = 0; struct qla_ddb_index *nt_ddb_idx; fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, &fw_ddb_dma); if (fw_ddb_entry == NULL) { DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n")); goto exit_new_nt_list; } max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : MAX_DEV_DB_ENTRIES; fw_idx_size = sizeof(struct qla_ddb_index); for (idx = 0; idx < max_ddbs; idx = next_idx) { ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma, NULL, &next_idx, &state, &conn_err, NULL, &conn_id); if (ret == QLA_ERROR) break; /* Check if NT, then add it to list */ if (strlen((char *)fw_ddb_entry->iscsi_name) == 0) goto continue_next_new_nt; if (!(state == DDB_DS_NO_CONNECTION_ACTIVE)) goto continue_next_new_nt; DEBUG2(ql4_printk(KERN_INFO, ha, "Adding DDB to session = 0x%x\n", idx)); nt_ddb_idx = vmalloc(fw_idx_size); if (!nt_ddb_idx) break; nt_ddb_idx->fw_ddb_idx = idx; ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL); if (ret == QLA_SUCCESS) { /* free nt_ddb_idx and do not add to list_nt */ vfree(nt_ddb_idx); goto continue_next_new_nt; } if (target_id < max_ddbs) fw_ddb_entry->ddb_link = cpu_to_le16(target_id); list_add_tail(&nt_ddb_idx->list, list_nt); ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER, idx); if (ret == QLA_ERROR) goto exit_new_nt_list; continue_next_new_nt: if (next_idx == 0) break; } exit_new_nt_list: if (fw_ddb_entry) dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); } /** * qla4xxx_sysfs_ddb_is_non_persistent - check for non-persistence of ddb entry * @dev: dev associated with the sysfs entry * @data: pointer to flashnode session object * * Returns: * 1: if flashnode entry is non-persistent * 0: if flashnode entry is persistent **/ static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev, void *data) { struct iscsi_bus_flash_session *fnode_sess; if (!iscsi_flashnode_bus_match(dev, NULL)) return 0; fnode_sess = iscsi_dev_to_flash_session(dev); return (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT); } /** * qla4xxx_sysfs_ddb_tgt_create - Create sysfs entry for target * @ha: pointer to host * @fw_ddb_entry: flash ddb data * @idx: target index * @user: if set then this call is made from userland else from kernel * * Returns: * On sucess: QLA_SUCCESS * On failure: QLA_ERROR * * This create separate sysfs entries for session and connection attributes of * the given fw ddb entry. * If this is invoked as a result of a userspace call then the entry is marked * as nonpersistent using flash_state field. **/ static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha, struct dev_db_entry *fw_ddb_entry, uint16_t *idx, int user) { struct iscsi_bus_flash_session *fnode_sess = NULL; struct iscsi_bus_flash_conn *fnode_conn = NULL; int rc = QLA_ERROR; fnode_sess = iscsi_create_flashnode_sess(ha->host, *idx, &qla4xxx_iscsi_transport, 0); if (!fnode_sess) { ql4_printk(KERN_ERR, ha, "%s: Unable to create session sysfs entry for flashnode %d of host%lu\n", __func__, *idx, ha->host_no); goto exit_tgt_create; } fnode_conn = iscsi_create_flashnode_conn(ha->host, fnode_sess, &qla4xxx_iscsi_transport, 0); if (!fnode_conn) { ql4_printk(KERN_ERR, ha, "%s: Unable to create conn sysfs entry for flashnode %d of host%lu\n", __func__, *idx, ha->host_no); goto free_sess; } if (user) { fnode_sess->flash_state = DEV_DB_NON_PERSISTENT; } else { fnode_sess->flash_state = DEV_DB_PERSISTENT; if (*idx == ha->pri_ddb_idx || *idx == ha->sec_ddb_idx) fnode_sess->is_boot_target = 1; else fnode_sess->is_boot_target = 0; } rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry); if (rc) goto free_sess; ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n", __func__, fnode_sess->dev.kobj.name); ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n", __func__, fnode_conn->dev.kobj.name); return QLA_SUCCESS; free_sess: iscsi_destroy_flashnode_sess(fnode_sess); exit_tgt_create: return QLA_ERROR; } /** * qla4xxx_sysfs_ddb_add - Add new ddb entry in flash * @shost: pointer to host * @buf: type of ddb entry (ipv4/ipv6) * @len: length of buf * * This creates new ddb entry in the flash by finding first free index and * storing default ddb there. And then create sysfs entry for the new ddb entry. **/ static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf, int len) { struct scsi_qla_host *ha = to_qla_host(shost); struct dev_db_entry *fw_ddb_entry = NULL; dma_addr_t fw_ddb_entry_dma; struct device *dev; uint16_t idx = 0; uint16_t max_ddbs = 0; uint32_t options = 0; uint32_t rval = QLA_ERROR; if (strncasecmp(PORTAL_TYPE_IPV4, buf, 4) && strncasecmp(PORTAL_TYPE_IPV6, buf, 4)) { DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Invalid portal type\n", __func__)); goto exit_ddb_add; } max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES : MAX_DEV_DB_ENTRIES; fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), &fw_ddb_entry_dma, GFP_KERNEL); if (!fw_ddb_entry) { DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to allocate dma buffer\n", __func__)); goto exit_ddb_add; } dev = iscsi_find_flashnode_sess(ha->host, NULL, qla4xxx_sysfs_ddb_is_non_persistent); if (dev) { ql4_printk(KERN_ERR, ha, "%s: A non-persistent entry %s found\n", __func__, dev->kobj.name); put_device(dev); goto exit_ddb_add; } /* Index 0 and 1 are reserved for boot target entries */ for (idx = 2; idx < max_ddbs; idx++) { if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry, fw_ddb_entry_dma, idx)) break; } if (idx == max_ddbs) goto exit_ddb_add; if (!strncasecmp("ipv6", buf, 4)) options |= IPV6_DEFAULT_DDB_ENTRY; rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma); if (rval == QLA_ERROR) goto exit_ddb_add; rval = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 1); exit_ddb_add: if (fw_ddb_entry) dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry, fw_ddb_entry_dma); if (rval == QLA_SUCCESS) return idx; else return -EIO; } /** * qla4xxx_sysfs_ddb_apply - write the target ddb contents to Flash * @fnode_sess: pointer to session attrs of flash ddb entry * @fnode_conn: pointer to connection attrs of flash ddb entry * * This writes the contents of target ddb buffer to Flash with a valid cookie * value in order to make the ddb entry persistent. **/ static int qla4xxx_sysfs_ddb_apply(struct iscsi_bus_flash_session *fnode_sess, struct iscsi_bus_flash_conn *fnode_conn) { struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); struct scsi_qla_host *ha = to_qla_host(shost); uint32_t dev_db_start_offset = FLASH_OFFSET_DB_INFO; struct dev_db_entry *fw_ddb_entry = NULL; dma_addr_t fw_ddb_entry_dma; uint32_t options = 0; int rval = 0; fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), &fw_ddb_entry_dma, GFP_KERNEL); if (!fw_ddb_entry) { DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to allocate dma buffer\n", __func__)); rval = -ENOMEM; goto exit_ddb_apply; } if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) options |= IPV6_DEFAULT_DDB_ENTRY; rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma); if (rval == QLA_ERROR) goto exit_ddb_apply; dev_db_start_offset += (fnode_sess->target_id * sizeof(*fw_ddb_entry)); qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry); fw_ddb_entry->cookie = DDB_VALID_COOKIE; rval = qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset, sizeof(*fw_ddb_entry), FLASH_OPT_RMW_COMMIT); if (rval == QLA_SUCCESS) { fnode_sess->flash_state = DEV_DB_PERSISTENT; ql4_printk(KERN_INFO, ha, "%s: flash node %u of host %lu written to flash\n", __func__, fnode_sess->target_id, ha->host_no); } else { rval = -EIO; ql4_printk(KERN_ERR, ha, "%s: Error while writing flash node %u of host %lu to flash\n", __func__, fnode_sess->target_id, ha->host_no); } exit_ddb_apply: if (fw_ddb_entry) dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry, fw_ddb_entry_dma); return rval; } static ssize_t qla4xxx_sysfs_ddb_conn_open(struct scsi_qla_host *ha, struct dev_db_entry *fw_ddb_entry, uint16_t idx) { struct dev_db_entry *ddb_entry = NULL; dma_addr_t ddb_entry_dma; unsigned long wtime; uint32_t mbx_sts = 0; uint32_t state = 0, conn_err = 0; uint16_t tmo = 0; int ret = 0; ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*ddb_entry), &ddb_entry_dma, GFP_KERNEL); if (!ddb_entry) { DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to allocate dma buffer\n", __func__)); return QLA_ERROR; } memcpy(ddb_entry, fw_ddb_entry, sizeof(*ddb_entry)); ret = qla4xxx_set_ddb_entry(ha, idx, ddb_entry_dma, &mbx_sts); if (ret != QLA_SUCCESS) { DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to set ddb entry for index %d\n", __func__, idx)); goto exit_ddb_conn_open; } qla4xxx_conn_open(ha, idx); /* To ensure that sendtargets is done, wait for at least 12 secs */ tmo = ((ha->def_timeout > LOGIN_TOV) && (ha->def_timeout < LOGIN_TOV * 10) ? ha->def_timeout : LOGIN_TOV); DEBUG2(ql4_printk(KERN_INFO, ha, "Default time to wait for login to ddb %d\n", tmo)); wtime = jiffies + (HZ * tmo); do { ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL, NULL, &state, &conn_err, NULL, NULL); if (ret == QLA_ERROR) continue; if (state == DDB_DS_NO_CONNECTION_ACTIVE || state == DDB_DS_SESSION_FAILED) break; schedule_timeout_uninterruptible(HZ / 10); } while (time_after(wtime, jiffies)); exit_ddb_conn_open: if (ddb_entry) dma_free_coherent(&ha->pdev->dev, sizeof(*ddb_entry), ddb_entry, ddb_entry_dma); return ret; } static int qla4xxx_ddb_login_st(struct scsi_qla_host *ha, struct dev_db_entry *fw_ddb_entry, uint16_t target_id) { struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; struct list_head list_nt; uint16_t ddb_index; int ret = 0; if (test_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags)) { ql4_printk(KERN_WARNING, ha, "%s: A discovery already in progress!\n", __func__); return QLA_ERROR; } INIT_LIST_HEAD(&list_nt); set_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags); ret = qla4xxx_get_ddb_index(ha, &ddb_index); if (ret == QLA_ERROR) goto exit_login_st_clr_bit; ret = qla4xxx_sysfs_ddb_conn_open(ha, fw_ddb_entry, ddb_index); if (ret == QLA_ERROR) goto exit_login_st; qla4xxx_build_new_nt_list(ha, &list_nt, target_id); list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, &list_nt, list) { list_del_init(&ddb_idx->list); qla4xxx_clear_ddb_entry(ha, ddb_idx->fw_ddb_idx); vfree(ddb_idx); } exit_login_st: if (qla4xxx_clear_ddb_entry(ha, ddb_index) == QLA_ERROR) { ql4_printk(KERN_ERR, ha, "Unable to clear DDB index = 0x%x\n", ddb_index); } clear_bit(ddb_index, ha->ddb_idx_map); exit_login_st_clr_bit: clear_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags); return ret; } static int qla4xxx_ddb_login_nt(struct scsi_qla_host *ha, struct dev_db_entry *fw_ddb_entry, uint16_t idx) { int ret = QLA_ERROR; ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL); if (ret != QLA_SUCCESS) ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER, idx); else ret = -EPERM; return ret; } /** * qla4xxx_sysfs_ddb_login - Login to the specified target * @fnode_sess: pointer to session attrs of flash ddb entry * @fnode_conn: pointer to connection attrs of flash ddb entry * * This logs in to the specified target **/ static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess, struct iscsi_bus_flash_conn *fnode_conn) { struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); struct scsi_qla_host *ha = to_qla_host(shost); struct dev_db_entry *fw_ddb_entry = NULL; dma_addr_t fw_ddb_entry_dma; uint32_t options = 0; int ret = 0; if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT) { ql4_printk(KERN_ERR, ha, "%s: Target info is not persistent\n", __func__); ret = -EIO; goto exit_ddb_login; } fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), &fw_ddb_entry_dma, GFP_KERNEL); if (!fw_ddb_entry) { DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to allocate dma buffer\n", __func__)); ret = -ENOMEM; goto exit_ddb_login; } if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) options |= IPV6_DEFAULT_DDB_ENTRY; ret = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma); if (ret == QLA_ERROR) goto exit_ddb_login; qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry); fw_ddb_entry->cookie = DDB_VALID_COOKIE; if (strlen((char *)fw_ddb_entry->iscsi_name) == 0) ret = qla4xxx_ddb_login_st(ha, fw_ddb_entry, fnode_sess->target_id); else ret = qla4xxx_ddb_login_nt(ha, fw_ddb_entry, fnode_sess->target_id); if (ret > 0) ret = -EIO; exit_ddb_login: if (fw_ddb_entry) dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry, fw_ddb_entry_dma); return ret; } /** * qla4xxx_sysfs_ddb_logout_sid - Logout session for the specified target * @cls_sess: pointer to session to be logged out * * This performs session log out from the specified target **/ static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess) { struct iscsi_session *sess; struct ddb_entry *ddb_entry = NULL; struct scsi_qla_host *ha; struct dev_db_entry *fw_ddb_entry = NULL; dma_addr_t fw_ddb_entry_dma; unsigned long flags; unsigned long wtime; uint32_t ddb_state; int options; int ret = 0; sess = cls_sess->dd_data; ddb_entry = sess->dd_data; ha = ddb_entry->ha; if (ddb_entry->ddb_type != FLASH_DDB) { ql4_printk(KERN_ERR, ha, "%s: Not a flash node session\n", __func__); ret = -ENXIO; goto exit_ddb_logout; } if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) { ql4_printk(KERN_ERR, ha, "%s: Logout from boot target entry is not permitted.\n", __func__); ret = -EPERM; goto exit_ddb_logout; } fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), &fw_ddb_entry_dma, GFP_KERNEL); if (!fw_ddb_entry) { ql4_printk(KERN_ERR, ha, "%s: Unable to allocate dma buffer\n", __func__); ret = -ENOMEM; goto exit_ddb_logout; } if (test_and_set_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags)) goto ddb_logout_init; ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry, fw_ddb_entry_dma, NULL, NULL, &ddb_state, NULL, NULL, NULL); if (ret == QLA_ERROR) goto ddb_logout_init; if (ddb_state == DDB_DS_SESSION_ACTIVE) goto ddb_logout_init; /* wait until next relogin is triggered using DF_RELOGIN and * clear DF_RELOGIN to avoid invocation of further relogin */ wtime = jiffies + (HZ * RELOGIN_TOV); do { if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags)) goto ddb_logout_init; schedule_timeout_uninterruptible(HZ); } while ((time_after(wtime, jiffies))); ddb_logout_init: atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY); atomic_set(&ddb_entry->relogin_timer, 0); options = LOGOUT_OPTION_CLOSE_SESSION; qla4xxx_session_logout_ddb(ha, ddb_entry, options); memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry)); wtime = jiffies + (HZ * LOGOUT_TOV); do { ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry, fw_ddb_entry_dma, NULL, NULL, &ddb_state, NULL, NULL, NULL); if (ret == QLA_ERROR) goto ddb_logout_clr_sess; if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) || (ddb_state == DDB_DS_SESSION_FAILED)) goto ddb_logout_clr_sess; schedule_timeout_uninterruptible(HZ); } while ((time_after(wtime, jiffies))); ddb_logout_clr_sess: qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); /* * we have decremented the reference count of the driver * when we setup the session to have the driver unload * to be seamless without actually destroying the * session **/ try_module_get(qla4xxx_iscsi_transport.owner); iscsi_destroy_endpoint(ddb_entry->conn->ep); spin_lock_irqsave(&ha->hardware_lock, flags); qla4xxx_free_ddb(ha, ddb_entry); clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map); spin_unlock_irqrestore(&ha->hardware_lock, flags); iscsi_session_teardown(ddb_entry->sess); clear_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags); ret = QLA_SUCCESS; exit_ddb_logout: if (fw_ddb_entry) dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry, fw_ddb_entry_dma); return ret; } /** * qla4xxx_sysfs_ddb_logout - Logout from the specified target * @fnode_sess: pointer to session attrs of flash ddb entry * @fnode_conn: pointer to connection attrs of flash ddb entry * * This performs log out from the specified target **/ static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess, struct iscsi_bus_flash_conn *fnode_conn) { struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); struct scsi_qla_host *ha = to_qla_host(shost); struct ql4_tuple_ddb *flash_tddb = NULL; struct ql4_tuple_ddb *tmp_tddb = NULL; struct dev_db_entry *fw_ddb_entry = NULL; struct ddb_entry *ddb_entry = NULL; dma_addr_t fw_ddb_dma; uint32_t next_idx = 0; uint32_t state = 0, conn_err = 0; uint16_t conn_id = 0; int idx, index; int status, ret = 0; fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, &fw_ddb_dma); if (fw_ddb_entry == NULL) { ql4_printk(KERN_ERR, ha, "%s:Out of memory\n", __func__); ret = -ENOMEM; goto exit_ddb_logout; } flash_tddb = vzalloc(sizeof(*flash_tddb)); if (!flash_tddb) { ql4_printk(KERN_WARNING, ha, "%s:Memory Allocation failed.\n", __func__); ret = -ENOMEM; goto exit_ddb_logout; } tmp_tddb = vzalloc(sizeof(*tmp_tddb)); if (!tmp_tddb) { ql4_printk(KERN_WARNING, ha, "%s:Memory Allocation failed.\n", __func__); ret = -ENOMEM; goto exit_ddb_logout; } if (!fnode_sess->targetname) { ql4_printk(KERN_ERR, ha, "%s:Cannot logout from SendTarget entry\n", __func__); ret = -EPERM; goto exit_ddb_logout; } if (fnode_sess->is_boot_target) { ql4_printk(KERN_ERR, ha, "%s: Logout from boot target entry is not permitted.\n", __func__); ret = -EPERM; goto exit_ddb_logout; } strscpy(flash_tddb->iscsi_name, fnode_sess->targetname, ISCSI_NAME_SIZE); if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) sprintf(flash_tddb->ip_addr, "%pI6", fnode_conn->ipaddress); else sprintf(flash_tddb->ip_addr, "%pI4", fnode_conn->ipaddress); flash_tddb->tpgt = fnode_sess->tpgt; flash_tddb->port = fnode_conn->port; COPY_ISID(flash_tddb->isid, fnode_sess->isid); for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) { ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); if (ddb_entry == NULL) continue; if (ddb_entry->ddb_type != FLASH_DDB) continue; index = ddb_entry->sess->target_id; status = qla4xxx_get_fwddb_entry(ha, index, fw_ddb_entry, fw_ddb_dma, NULL, &next_idx, &state, &conn_err, NULL, &conn_id); if (status == QLA_ERROR) { ret = -ENOMEM; break; } qla4xxx_convert_param_ddb(fw_ddb_entry, tmp_tddb, NULL); status = qla4xxx_compare_tuple_ddb(ha, flash_tddb, tmp_tddb, true); if (status == QLA_SUCCESS) { ret = qla4xxx_sysfs_ddb_logout_sid(ddb_entry->sess); break; } } if (idx == MAX_DDB_ENTRIES) ret = -ESRCH; exit_ddb_logout: vfree(flash_tddb); vfree(tmp_tddb); if (fw_ddb_entry) dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); return ret; } static int qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess, int param, char *buf) { struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); struct scsi_qla_host *ha = to_qla_host(shost); struct iscsi_bus_flash_conn *fnode_conn; struct ql4_chap_table chap_tbl; struct device *dev; int parent_type; int rc = 0; dev = iscsi_find_flashnode_conn(fnode_sess); if (!dev) return -EIO; fnode_conn = iscsi_dev_to_flash_conn(dev); switch (param) { case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6: rc = sprintf(buf, "%u\n", fnode_conn->is_fw_assigned_ipv6); break; case ISCSI_FLASHNODE_PORTAL_TYPE: rc = sprintf(buf, "%s\n", fnode_sess->portal_type); break; case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE: rc = sprintf(buf, "%u\n", fnode_sess->auto_snd_tgt_disable); break; case ISCSI_FLASHNODE_DISCOVERY_SESS: rc = sprintf(buf, "%u\n", fnode_sess->discovery_sess); break; case ISCSI_FLASHNODE_ENTRY_EN: rc = sprintf(buf, "%u\n", fnode_sess->entry_state); break; case ISCSI_FLASHNODE_HDR_DGST_EN: rc = sprintf(buf, "%u\n", fnode_conn->hdrdgst_en); break; case ISCSI_FLASHNODE_DATA_DGST_EN: rc = sprintf(buf, "%u\n", fnode_conn->datadgst_en); break; case ISCSI_FLASHNODE_IMM_DATA_EN: rc = sprintf(buf, "%u\n", fnode_sess->imm_data_en); break; case ISCSI_FLASHNODE_INITIAL_R2T_EN: rc = sprintf(buf, "%u\n", fnode_sess->initial_r2t_en); break; case ISCSI_FLASHNODE_DATASEQ_INORDER: rc = sprintf(buf, "%u\n", fnode_sess->dataseq_inorder_en); break; case ISCSI_FLASHNODE_PDU_INORDER: rc = sprintf(buf, "%u\n", fnode_sess->pdu_inorder_en); break; case ISCSI_FLASHNODE_CHAP_AUTH_EN: rc = sprintf(buf, "%u\n", fnode_sess->chap_auth_en); break; case ISCSI_FLASHNODE_SNACK_REQ_EN: rc = sprintf(buf, "%u\n", fnode_conn->snack_req_en); break; case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN: rc = sprintf(buf, "%u\n", fnode_sess->discovery_logout_en); break; case ISCSI_FLASHNODE_BIDI_CHAP_EN: rc = sprintf(buf, "%u\n", fnode_sess->bidi_chap_en); break; case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL: rc = sprintf(buf, "%u\n", fnode_sess->discovery_auth_optional); break; case ISCSI_FLASHNODE_ERL: rc = sprintf(buf, "%u\n", fnode_sess->erl); break; case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT: rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_stat); break; case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE: rc = sprintf(buf, "%u\n", fnode_conn->tcp_nagle_disable); break; case ISCSI_FLASHNODE_TCP_WSF_DISABLE: rc = sprintf(buf, "%u\n", fnode_conn->tcp_wsf_disable); break; case ISCSI_FLASHNODE_TCP_TIMER_SCALE: rc = sprintf(buf, "%u\n", fnode_conn->tcp_timer_scale); break; case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN: rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_en); break; case ISCSI_FLASHNODE_IP_FRAG_DISABLE: rc = sprintf(buf, "%u\n", fnode_conn->fragment_disable); break; case ISCSI_FLASHNODE_MAX_RECV_DLENGTH: rc = sprintf(buf, "%u\n", fnode_conn->max_recv_dlength); break; case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH: rc = sprintf(buf, "%u\n", fnode_conn->max_xmit_dlength); break; case ISCSI_FLASHNODE_FIRST_BURST: rc = sprintf(buf, "%u\n", fnode_sess->first_burst); break; case ISCSI_FLASHNODE_DEF_TIME2WAIT: rc = sprintf(buf, "%u\n", fnode_sess->time2wait); break; case ISCSI_FLASHNODE_DEF_TIME2RETAIN: rc = sprintf(buf, "%u\n", fnode_sess->time2retain); break; case ISCSI_FLASHNODE_MAX_R2T: rc = sprintf(buf, "%u\n", fnode_sess->max_r2t); break; case ISCSI_FLASHNODE_KEEPALIVE_TMO: rc = sprintf(buf, "%u\n", fnode_conn->keepalive_timeout); break; case ISCSI_FLASHNODE_ISID: rc = sprintf(buf, "%pm\n", fnode_sess->isid); break; case ISCSI_FLASHNODE_TSID: rc = sprintf(buf, "%u\n", fnode_sess->tsid); break; case ISCSI_FLASHNODE_PORT: rc = sprintf(buf, "%d\n", fnode_conn->port); break; case ISCSI_FLASHNODE_MAX_BURST: rc = sprintf(buf, "%u\n", fnode_sess->max_burst); break; case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO: rc = sprintf(buf, "%u\n", fnode_sess->default_taskmgmt_timeout); break; case ISCSI_FLASHNODE_IPADDR: if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) rc = sprintf(buf, "%pI6\n", fnode_conn->ipaddress); else rc = sprintf(buf, "%pI4\n", fnode_conn->ipaddress); break; case ISCSI_FLASHNODE_ALIAS: if (fnode_sess->targetalias) rc = sprintf(buf, "%s\n", fnode_sess->targetalias); else rc = sprintf(buf, "\n"); break; case ISCSI_FLASHNODE_REDIRECT_IPADDR: if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) rc = sprintf(buf, "%pI6\n", fnode_conn->redirect_ipaddr); else rc = sprintf(buf, "%pI4\n", fnode_conn->redirect_ipaddr); break; case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE: rc = sprintf(buf, "%u\n", fnode_conn->max_segment_size); break; case ISCSI_FLASHNODE_LOCAL_PORT: rc = sprintf(buf, "%u\n", fnode_conn->local_port); break; case ISCSI_FLASHNODE_IPV4_TOS: rc = sprintf(buf, "%u\n", fnode_conn->ipv4_tos); break; case ISCSI_FLASHNODE_IPV6_TC: if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) rc = sprintf(buf, "%u\n", fnode_conn->ipv6_traffic_class); else rc = sprintf(buf, "\n"); break; case ISCSI_FLASHNODE_IPV6_FLOW_LABEL: rc = sprintf(buf, "%u\n", fnode_conn->ipv6_flow_label); break; case ISCSI_FLASHNODE_LINK_LOCAL_IPV6: if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) rc = sprintf(buf, "%pI6\n", fnode_conn->link_local_ipv6_addr); else rc = sprintf(buf, "\n"); break; case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX: rc = sprintf(buf, "%u\n", fnode_sess->discovery_parent_idx); break; case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE: if (fnode_sess->discovery_parent_type == DDB_ISNS) parent_type = ISCSI_DISC_PARENT_ISNS; else if (fnode_sess->discovery_parent_type == DDB_NO_LINK) parent_type = ISCSI_DISC_PARENT_UNKNOWN; else if (fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES) parent_type = ISCSI_DISC_PARENT_SENDTGT; else parent_type = ISCSI_DISC_PARENT_UNKNOWN; rc = sprintf(buf, "%s\n", iscsi_get_discovery_parent_name(parent_type)); break; case ISCSI_FLASHNODE_NAME: if (fnode_sess->targetname) rc = sprintf(buf, "%s\n", fnode_sess->targetname); else rc = sprintf(buf, "\n"); break; case ISCSI_FLASHNODE_TPGT: rc = sprintf(buf, "%u\n", fnode_sess->tpgt); break; case ISCSI_FLASHNODE_TCP_XMIT_WSF: rc = sprintf(buf, "%u\n", fnode_conn->tcp_xmit_wsf); break; case ISCSI_FLASHNODE_TCP_RECV_WSF: rc = sprintf(buf, "%u\n", fnode_conn->tcp_recv_wsf); break; case ISCSI_FLASHNODE_CHAP_OUT_IDX: rc = sprintf(buf, "%u\n", fnode_sess->chap_out_idx); break; case ISCSI_FLASHNODE_USERNAME: if (fnode_sess->chap_auth_en) { qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name, chap_tbl.secret, fnode_sess->chap_out_idx); rc = sprintf(buf, "%s\n", chap_tbl.name); } else { rc = sprintf(buf, "\n"); } break; case ISCSI_FLASHNODE_PASSWORD: if (fnode_sess->chap_auth_en) { qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name, chap_tbl.secret, fnode_sess->chap_out_idx); rc = sprintf(buf, "%s\n", chap_tbl.secret); } else { rc = sprintf(buf, "\n"); } break; case ISCSI_FLASHNODE_STATSN: rc = sprintf(buf, "%u\n", fnode_conn->statsn); break; case ISCSI_FLASHNODE_EXP_STATSN: rc = sprintf(buf, "%u\n", fnode_conn->exp_statsn); break; case ISCSI_FLASHNODE_IS_BOOT_TGT: rc = sprintf(buf, "%u\n", fnode_sess->is_boot_target); break; default: rc = -ENOSYS; break; } put_device(dev); return rc; } /** * qla4xxx_sysfs_ddb_set_param - Set parameter for firmware DDB entry * @fnode_sess: pointer to session attrs of flash ddb entry * @fnode_conn: pointer to connection attrs of flash ddb entry * @data: Parameters and their values to update * @len: len of data * * This sets the parameter of flash ddb entry and writes them to flash **/ static int qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess, struct iscsi_bus_flash_conn *fnode_conn, void *data, int len) { struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); struct scsi_qla_host *ha = to_qla_host(shost); struct iscsi_flashnode_param_info *fnode_param; struct ql4_chap_table chap_tbl; struct nlattr *attr; uint16_t chap_out_idx = INVALID_ENTRY; int rc = QLA_ERROR; uint32_t rem = len; memset((void *)&chap_tbl, 0, sizeof(chap_tbl)); nla_for_each_attr(attr, data, len, rem) { if (nla_len(attr) < sizeof(*fnode_param)) { rc = -EINVAL; goto exit_set_param; } fnode_param = nla_data(attr); switch (fnode_param->param) { case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6: fnode_conn->is_fw_assigned_ipv6 = fnode_param->value[0]; break; case ISCSI_FLASHNODE_PORTAL_TYPE: memcpy(fnode_sess->portal_type, fnode_param->value, strlen(fnode_sess->portal_type)); break; case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE: fnode_sess->auto_snd_tgt_disable = fnode_param->value[0]; break; case ISCSI_FLASHNODE_DISCOVERY_SESS: fnode_sess->discovery_sess = fnode_param->value[0]; break; case ISCSI_FLASHNODE_ENTRY_EN: fnode_sess->entry_state = fnode_param->value[0]; break; case ISCSI_FLASHNODE_HDR_DGST_EN: fnode_conn->hdrdgst_en = fnode_param->value[0]; break; case ISCSI_FLASHNODE_DATA_DGST_EN: fnode_conn->datadgst_en = fnode_param->value[0]; break; case ISCSI_FLASHNODE_IMM_DATA_EN: fnode_sess->imm_data_en = fnode_param->value[0]; break; case ISCSI_FLASHNODE_INITIAL_R2T_EN: fnode_sess->initial_r2t_en = fnode_param->value[0]; break; case ISCSI_FLASHNODE_DATASEQ_INORDER: fnode_sess->dataseq_inorder_en = fnode_param->value[0]; break; case ISCSI_FLASHNODE_PDU_INORDER: fnode_sess->pdu_inorder_en = fnode_param->value[0]; break; case ISCSI_FLASHNODE_CHAP_AUTH_EN: fnode_sess->chap_auth_en = fnode_param->value[0]; /* Invalidate chap index if chap auth is disabled */ if (!fnode_sess->chap_auth_en) fnode_sess->chap_out_idx = INVALID_ENTRY; break; case ISCSI_FLASHNODE_SNACK_REQ_EN: fnode_conn->snack_req_en = fnode_param->value[0]; break; case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN: fnode_sess->discovery_logout_en = fnode_param->value[0]; break; case ISCSI_FLASHNODE_BIDI_CHAP_EN: fnode_sess->bidi_chap_en = fnode_param->value[0]; break; case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL: fnode_sess->discovery_auth_optional = fnode_param->value[0]; break; case ISCSI_FLASHNODE_ERL: fnode_sess->erl = fnode_param->value[0]; break; case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT: fnode_conn->tcp_timestamp_stat = fnode_param->value[0]; break; case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE: fnode_conn->tcp_nagle_disable = fnode_param->value[0]; break; case ISCSI_FLASHNODE_TCP_WSF_DISABLE: fnode_conn->tcp_wsf_disable = fnode_param->value[0]; break; case ISCSI_FLASHNODE_TCP_TIMER_SCALE: fnode_conn->tcp_timer_scale = fnode_param->value[0]; break; case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN: fnode_conn->tcp_timestamp_en = fnode_param->value[0]; break; case ISCSI_FLASHNODE_IP_FRAG_DISABLE: fnode_conn->fragment_disable = fnode_param->value[0]; break; case ISCSI_FLASHNODE_MAX_RECV_DLENGTH: fnode_conn->max_recv_dlength = *(unsigned *)fnode_param->value; break; case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH: fnode_conn->max_xmit_dlength = *(unsigned *)fnode_param->value; break; case ISCSI_FLASHNODE_FIRST_BURST: fnode_sess->first_burst = *(unsigned *)fnode_param->value; break; case ISCSI_FLASHNODE_DEF_TIME2WAIT: fnode_sess->time2wait = *(uint16_t *)fnode_param->value; break; case ISCSI_FLASHNODE_DEF_TIME2RETAIN: fnode_sess->time2retain = *(uint16_t *)fnode_param->value; break; case ISCSI_FLASHNODE_MAX_R2T: fnode_sess->max_r2t = *(uint16_t *)fnode_param->value; break; case ISCSI_FLASHNODE_KEEPALIVE_TMO: fnode_conn->keepalive_timeout = *(uint16_t *)fnode_param->value; break; case ISCSI_FLASHNODE_ISID: memcpy(fnode_sess->isid, fnode_param->value, sizeof(fnode_sess->isid)); break; case ISCSI_FLASHNODE_TSID: fnode_sess->tsid = *(uint16_t *)fnode_param->value; break; case ISCSI_FLASHNODE_PORT: fnode_conn->port = *(uint16_t *)fnode_param->value; break; case ISCSI_FLASHNODE_MAX_BURST: fnode_sess->max_burst = *(unsigned *)fnode_param->value; break; case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO: fnode_sess->default_taskmgmt_timeout = *(uint16_t *)fnode_param->value; break; case ISCSI_FLASHNODE_IPADDR: memcpy(fnode_conn->ipaddress, fnode_param->value, IPv6_ADDR_LEN); break; case ISCSI_FLASHNODE_ALIAS: rc = iscsi_switch_str_param(&fnode_sess->targetalias, (char *)fnode_param->value); break; case ISCSI_FLASHNODE_REDIRECT_IPADDR: memcpy(fnode_conn->redirect_ipaddr, fnode_param->value, IPv6_ADDR_LEN); break; case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE: fnode_conn->max_segment_size = *(unsigned *)fnode_param->value; break; case ISCSI_FLASHNODE_LOCAL_PORT: fnode_conn->local_port = *(uint16_t *)fnode_param->value; break; case ISCSI_FLASHNODE_IPV4_TOS: fnode_conn->ipv4_tos = fnode_param->value[0]; break; case ISCSI_FLASHNODE_IPV6_TC: fnode_conn->ipv6_traffic_class = fnode_param->value[0]; break; case ISCSI_FLASHNODE_IPV6_FLOW_LABEL: fnode_conn->ipv6_flow_label = fnode_param->value[0]; break; case ISCSI_FLASHNODE_NAME: rc = iscsi_switch_str_param(&fnode_sess->targetname, (char *)fnode_param->value); break; case ISCSI_FLASHNODE_TPGT: fnode_sess->tpgt = *(uint16_t *)fnode_param->value; break; case ISCSI_FLASHNODE_LINK_LOCAL_IPV6: memcpy(fnode_conn->link_local_ipv6_addr, fnode_param->value, IPv6_ADDR_LEN); break; case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX: fnode_sess->discovery_parent_idx = *(uint16_t *)fnode_param->value; break; case ISCSI_FLASHNODE_TCP_XMIT_WSF: fnode_conn->tcp_xmit_wsf = *(uint8_t *)fnode_param->value; break; case ISCSI_FLASHNODE_TCP_RECV_WSF: fnode_conn->tcp_recv_wsf = *(uint8_t *)fnode_param->value; break; case ISCSI_FLASHNODE_STATSN: fnode_conn->statsn = *(uint32_t *)fnode_param->value; break; case ISCSI_FLASHNODE_EXP_STATSN: fnode_conn->exp_statsn = *(uint32_t *)fnode_param->value; break; case ISCSI_FLASHNODE_CHAP_OUT_IDX: chap_out_idx = *(uint16_t *)fnode_param->value; if (!qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name, chap_tbl.secret, chap_out_idx)) { fnode_sess->chap_out_idx = chap_out_idx; /* Enable chap auth if chap index is valid */ fnode_sess->chap_auth_en = QL4_PARAM_ENABLE; } break; default: ql4_printk(KERN_ERR, ha, "%s: No such sysfs attribute\n", __func__); rc = -ENOSYS; goto exit_set_param; } } rc = qla4xxx_sysfs_ddb_apply(fnode_sess, fnode_conn); exit_set_param: return rc; } /** * qla4xxx_sysfs_ddb_delete - Delete firmware DDB entry * @fnode_sess: pointer to session attrs of flash ddb entry * * This invalidates the flash ddb entry at the given index **/ static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess) { struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); struct scsi_qla_host *ha = to_qla_host(shost); uint32_t dev_db_start_offset; uint32_t dev_db_end_offset; struct dev_db_entry *fw_ddb_entry = NULL; dma_addr_t fw_ddb_entry_dma; uint16_t *ddb_cookie = NULL; size_t ddb_size = 0; void *pddb = NULL; int target_id; int rc = 0; if (fnode_sess->is_boot_target) { rc = -EPERM; DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Deletion of boot target entry is not permitted.\n", __func__)); goto exit_ddb_del; } if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT) goto sysfs_ddb_del; if (is_qla40XX(ha)) { dev_db_start_offset = FLASH_OFFSET_DB_INFO; dev_db_end_offset = FLASH_OFFSET_DB_END; dev_db_start_offset += (fnode_sess->target_id * sizeof(*fw_ddb_entry)); ddb_size = sizeof(*fw_ddb_entry); } else { dev_db_start_offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_ddb << 2); /* flt_ddb_size is DDB table size for both ports * so divide it by 2 to calculate the offset for second port */ if (ha->port_num == 1) dev_db_start_offset += (ha->hw.flt_ddb_size / 2); dev_db_end_offset = dev_db_start_offset + (ha->hw.flt_ddb_size / 2); dev_db_start_offset += (fnode_sess->target_id * sizeof(*fw_ddb_entry)); dev_db_start_offset += offsetof(struct dev_db_entry, cookie); ddb_size = sizeof(*ddb_cookie); } DEBUG2(ql4_printk(KERN_ERR, ha, "%s: start offset=%u, end offset=%u\n", __func__, dev_db_start_offset, dev_db_end_offset)); if (dev_db_start_offset > dev_db_end_offset) { rc = -EIO; DEBUG2(ql4_printk(KERN_ERR, ha, "%s:Invalid DDB index %u\n", __func__, fnode_sess->target_id)); goto exit_ddb_del; } pddb = dma_alloc_coherent(&ha->pdev->dev, ddb_size, &fw_ddb_entry_dma, GFP_KERNEL); if (!pddb) { rc = -ENOMEM; DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to allocate dma buffer\n", __func__)); goto exit_ddb_del; } if (is_qla40XX(ha)) { fw_ddb_entry = pddb; memset(fw_ddb_entry, 0, ddb_size); ddb_cookie = &fw_ddb_entry->cookie; } else { ddb_cookie = pddb; } /* invalidate the cookie */ *ddb_cookie = 0xFFEE; qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset, ddb_size, FLASH_OPT_RMW_COMMIT); sysfs_ddb_del: target_id = fnode_sess->target_id; iscsi_destroy_flashnode_sess(fnode_sess); ql4_printk(KERN_INFO, ha, "%s: session and conn entries for flashnode %u of host %lu deleted\n", __func__, target_id, ha->host_no); exit_ddb_del: if (pddb) dma_free_coherent(&ha->pdev->dev, ddb_size, pddb, fw_ddb_entry_dma); return rc; } /** * qla4xxx_sysfs_ddb_export - Create sysfs entries for firmware DDBs * @ha: pointer to adapter structure * * Export the firmware DDB for all send targets and normal targets to sysfs. **/ int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha) { struct dev_db_entry *fw_ddb_entry = NULL; dma_addr_t fw_ddb_entry_dma; uint16_t max_ddbs; uint16_t idx = 0; int ret = QLA_SUCCESS; fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), &fw_ddb_entry_dma, GFP_KERNEL); if (!fw_ddb_entry) { DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to allocate dma buffer\n", __func__)); return -ENOMEM; } max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES : MAX_DEV_DB_ENTRIES; for (idx = 0; idx < max_ddbs; idx++) { if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry, fw_ddb_entry_dma, idx)) continue; ret = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 0); if (ret) { ret = -EIO; break; } } dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry, fw_ddb_entry_dma); return ret; } static void qla4xxx_sysfs_ddb_remove(struct scsi_qla_host *ha) { iscsi_destroy_all_flashnode(ha->host); } /** * qla4xxx_build_ddb_list - Build ddb list and setup sessions * @ha: pointer to adapter structure * @is_reset: Is this init path or reset path * * Create a list of sendtargets (st) from firmware DDBs, issue send targets * using connection open, then create the list of normal targets (nt) * from firmware DDBs. Based on the list of nt setup session and connection * objects. **/ void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset) { uint16_t tmo = 0; struct list_head list_st, list_nt; struct qla_ddb_index *st_ddb_idx, *st_ddb_idx_tmp; unsigned long wtime; if (!test_bit(AF_LINK_UP, &ha->flags)) { set_bit(AF_BUILD_DDB_LIST, &ha->flags); ha->is_reset = is_reset; return; } INIT_LIST_HEAD(&list_st); INIT_LIST_HEAD(&list_nt); qla4xxx_build_st_list(ha, &list_st); /* Before issuing conn open mbox, ensure all IPs states are configured * Note, conn open fails if IPs are not configured */ qla4xxx_wait_for_ip_configuration(ha); /* Go thru the STs and fire the sendtargets by issuing conn open mbx */ list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) { qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx); } /* Wait to ensure all sendtargets are done for min 12 sec wait */ tmo = ((ha->def_timeout > LOGIN_TOV) && (ha->def_timeout < LOGIN_TOV * 10) ? ha->def_timeout : LOGIN_TOV); DEBUG2(ql4_printk(KERN_INFO, ha, "Default time to wait for build ddb %d\n", tmo)); wtime = jiffies + (HZ * tmo); do { if (list_empty(&list_st)) break; qla4xxx_remove_failed_ddb(ha, &list_st); schedule_timeout_uninterruptible(HZ / 10); } while (time_after(wtime, jiffies)); qla4xxx_build_nt_list(ha, &list_nt, &list_st, is_reset); qla4xxx_free_ddb_list(&list_st); qla4xxx_free_ddb_list(&list_nt); qla4xxx_free_ddb_index(ha); } /** * qla4xxx_wait_login_resp_boot_tgt - Wait for iSCSI boot target login * response. * @ha: pointer to adapter structure * * When the boot entry is normal iSCSI target then DF_BOOT_TGT flag will be * set in DDB and we will wait for login response of boot targets during * probe. **/ static void qla4xxx_wait_login_resp_boot_tgt(struct scsi_qla_host *ha) { struct ddb_entry *ddb_entry; struct dev_db_entry *fw_ddb_entry = NULL; dma_addr_t fw_ddb_entry_dma; unsigned long wtime; uint32_t ddb_state; int max_ddbs, idx, ret; max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : MAX_DEV_DB_ENTRIES; fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), &fw_ddb_entry_dma, GFP_KERNEL); if (!fw_ddb_entry) { ql4_printk(KERN_ERR, ha, "%s: Unable to allocate dma buffer\n", __func__); goto exit_login_resp; } wtime = jiffies + (HZ * BOOT_LOGIN_RESP_TOV); for (idx = 0; idx < max_ddbs; idx++) { ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); if (ddb_entry == NULL) continue; if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: DDB index [%d]\n", __func__, ddb_entry->fw_ddb_index)); do { ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry, fw_ddb_entry_dma, NULL, NULL, &ddb_state, NULL, NULL, NULL); if (ret == QLA_ERROR) goto exit_login_resp; if ((ddb_state == DDB_DS_SESSION_ACTIVE) || (ddb_state == DDB_DS_SESSION_FAILED)) break; schedule_timeout_uninterruptible(HZ); } while ((time_after(wtime, jiffies))); if (!time_after(wtime, jiffies)) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Login response wait timer expired\n", __func__)); goto exit_login_resp; } } } exit_login_resp: if (fw_ddb_entry) dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry, fw_ddb_entry_dma); } /** * qla4xxx_probe_adapter - callback function to probe HBA * @pdev: pointer to pci_dev structure * @ent: pointer to pci_device entry * * This routine will probe for Qlogic 4xxx iSCSI host adapters. * It returns zero if successful. It also initializes all data necessary for * the driver. **/ static int qla4xxx_probe_adapter(struct pci_dev *pdev, const struct pci_device_id *ent) { int ret = -ENODEV, status; struct Scsi_Host *host; struct scsi_qla_host *ha; uint8_t init_retry_count = 0; char buf[34]; struct qla4_8xxx_legacy_intr_set *nx_legacy_intr; uint32_t dev_state; if (pci_enable_device(pdev)) return -1; host = iscsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha), 0); if (host == NULL) { printk(KERN_WARNING "qla4xxx: Couldn't allocate host from scsi layer!\n"); goto probe_disable_device; } /* Clear our data area */ ha = to_qla_host(host); memset(ha, 0, sizeof(*ha)); /* Save the information from PCI BIOS. */ ha->pdev = pdev; ha->host = host; ha->host_no = host->host_no; ha->func_num = PCI_FUNC(ha->pdev->devfn); /* Setup Runtime configurable options */ if (is_qla8022(ha)) { ha->isp_ops = &qla4_82xx_isp_ops; ha->reg_tbl = (uint32_t *) qla4_82xx_reg_tbl; ha->qdr_sn_window = -1; ha->ddr_mn_window = -1; ha->curr_window = 255; nx_legacy_intr = &legacy_intr[ha->func_num]; ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit; ha->nx_legacy_intr.tgt_status_reg = nx_legacy_intr->tgt_status_reg; ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg; ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg; } else if (is_qla8032(ha) || is_qla8042(ha)) { ha->isp_ops = &qla4_83xx_isp_ops; ha->reg_tbl = (uint32_t *)qla4_83xx_reg_tbl; } else { ha->isp_ops = &qla4xxx_isp_ops; } if (is_qla80XX(ha)) { rwlock_init(&ha->hw_lock); ha->pf_bit = ha->func_num << 16; /* Set EEH reset type to fundamental if required by hba */ pdev->needs_freset = 1; } /* Configure PCI I/O space. */ ret = ha->isp_ops->iospace_config(ha); if (ret) goto probe_failed_ioconfig; ql4_printk(KERN_INFO, ha, "Found an ISP%04x, irq %d, iobase 0x%p\n", pdev->device, pdev->irq, ha->reg); qla4xxx_config_dma_addressing(ha); /* Initialize lists and spinlocks. */ INIT_LIST_HEAD(&ha->free_srb_q); mutex_init(&ha->mbox_sem); mutex_init(&ha->chap_sem); init_completion(&ha->mbx_intr_comp); init_completion(&ha->disable_acb_comp); init_completion(&ha->idc_comp); init_completion(&ha->link_up_comp); spin_lock_init(&ha->hardware_lock); spin_lock_init(&ha->work_lock); /* Initialize work list */ INIT_LIST_HEAD(&ha->work_list); /* Allocate dma buffers */ if (qla4xxx_mem_alloc(ha)) { ql4_printk(KERN_WARNING, ha, "[ERROR] Failed to allocate memory for adapter\n"); ret = -ENOMEM; goto probe_failed; } host->cmd_per_lun = 3; host->max_channel = 0; host->max_lun = MAX_LUNS - 1; host->max_id = MAX_TARGETS; host->max_cmd_len = IOCB_MAX_CDB_LEN; host->can_queue = MAX_SRBS ; host->transportt = qla4xxx_scsi_transport; pci_set_drvdata(pdev, ha); ret = scsi_add_host(host, &pdev->dev); if (ret) goto probe_failed; if (is_qla80XX(ha)) qla4_8xxx_get_flash_info(ha); if (is_qla8032(ha) || is_qla8042(ha)) { qla4_83xx_read_reset_template(ha); /* * NOTE: If ql4dontresethba==1, set IDC_CTRL DONTRESET_BIT0. * If DONRESET_BIT0 is set, drivers should not set dev_state * to NEED_RESET. But if NEED_RESET is set, drivers should * should honor the reset. */ if (ql4xdontresethba == 1) qla4_83xx_set_idc_dontreset(ha); } /* * Initialize the Host adapter request/response queues and * firmware * NOTE: interrupts enabled upon successful completion */ status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER); /* Dont retry adapter initialization if IRQ allocation failed */ if (is_qla80XX(ha) && (status == QLA_ERROR)) goto skip_retry_init; while ((!test_bit(AF_ONLINE, &ha->flags)) && init_retry_count++ < MAX_INIT_RETRIES) { if (is_qla80XX(ha)) { ha->isp_ops->idc_lock(ha); dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE); ha->isp_ops->idc_unlock(ha); if (dev_state == QLA8XXX_DEV_FAILED) { ql4_printk(KERN_WARNING, ha, "%s: don't retry " "initialize adapter. H/W is in failed state\n", __func__); break; } } DEBUG2(printk("scsi: %s: retrying adapter initialization " "(%d)\n", __func__, init_retry_count)); if (ha->isp_ops->reset_chip(ha) == QLA_ERROR) continue; status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER); if (is_qla80XX(ha) && (status == QLA_ERROR)) { if (qla4_8xxx_check_init_adapter_retry(ha) == QLA_ERROR) goto skip_retry_init; } } skip_retry_init: if (!test_bit(AF_ONLINE, &ha->flags)) { ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n"); if ((is_qla8022(ha) && ql4xdontresethba) || ((is_qla8032(ha) || is_qla8042(ha)) && qla4_83xx_idc_dontreset(ha))) { /* Put the device in failed state. */ DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n")); ha->isp_ops->idc_lock(ha); qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, QLA8XXX_DEV_FAILED); ha->isp_ops->idc_unlock(ha); } ret = -ENODEV; goto remove_host; } /* Startup the kernel thread for this host adapter. */ DEBUG2(printk("scsi: %s: Starting kernel thread for " "qla4xxx_dpc\n", __func__)); sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no); ha->dpc_thread = create_singlethread_workqueue(buf); if (!ha->dpc_thread) { ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n"); ret = -ENODEV; goto remove_host; } INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc); ha->task_wq = alloc_workqueue("qla4xxx_%lu_task", WQ_MEM_RECLAIM, 1, ha->host_no); if (!ha->task_wq) { ql4_printk(KERN_WARNING, ha, "Unable to start task thread!\n"); ret = -ENODEV; goto remove_host; } /* * For ISP-8XXX, request_irqs is called in qla4_8xxx_load_risc * (which is called indirectly by qla4xxx_initialize_adapter), * so that irqs will be registered after crbinit but before * mbx_intr_enable. */ if (is_qla40XX(ha)) { ret = qla4xxx_request_irqs(ha); if (ret) { ql4_printk(KERN_WARNING, ha, "Failed to reserve " "interrupt %d already in use.\n", pdev->irq); goto remove_host; } } pci_save_state(ha->pdev); ha->isp_ops->enable_intrs(ha); /* Start timer thread. */ qla4xxx_start_timer(ha, 1); set_bit(AF_INIT_DONE, &ha->flags); qla4_8xxx_alloc_sysfs_attr(ha); printk(KERN_INFO " QLogic iSCSI HBA Driver version: %s\n" " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n", qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev), ha->host_no, ha->fw_info.fw_major, ha->fw_info.fw_minor, ha->fw_info.fw_patch, ha->fw_info.fw_build); /* Set the driver version */ if (is_qla80XX(ha)) qla4_8xxx_set_param(ha, SET_DRVR_VERSION); if (qla4xxx_setup_boot_info(ha)) ql4_printk(KERN_ERR, ha, "%s: No iSCSI boot target configured\n", __func__); set_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags); /* Perform the build ddb list and login to each */ qla4xxx_build_ddb_list(ha, INIT_ADAPTER); iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb); qla4xxx_wait_login_resp_boot_tgt(ha); qla4xxx_create_chap_list(ha); qla4xxx_create_ifaces(ha); return 0; remove_host: scsi_remove_host(ha->host); probe_failed: qla4xxx_free_adapter(ha); probe_failed_ioconfig: scsi_host_put(ha->host); probe_disable_device: pci_disable_device(pdev); return ret; } /** * qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize * @ha: pointer to adapter structure * * Mark the other ISP-4xxx port to indicate that the driver is being removed, * so that the other port will not re-initialize while in the process of * removing the ha due to driver unload or hba hotplug. **/ static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha) { struct scsi_qla_host *other_ha = NULL; struct pci_dev *other_pdev = NULL; int fn = ISP4XXX_PCI_FN_2; /*iscsi function numbers for ISP4xxx is 1 and 3*/ if (PCI_FUNC(ha->pdev->devfn) & BIT_1) fn = ISP4XXX_PCI_FN_1; other_pdev = pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus), ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn), fn)); /* Get other_ha if other_pdev is valid and state is enable*/ if (other_pdev) { if (atomic_read(&other_pdev->enable_cnt)) { other_ha = pci_get_drvdata(other_pdev); if (other_ha) { set_bit(AF_HA_REMOVAL, &other_ha->flags); DEBUG2(ql4_printk(KERN_INFO, ha, "%s: " "Prevent %s reinit\n", __func__, dev_name(&other_ha->pdev->dev))); } } pci_dev_put(other_pdev); } } static void qla4xxx_destroy_ddb(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry) { struct dev_db_entry *fw_ddb_entry = NULL; dma_addr_t fw_ddb_entry_dma; unsigned long wtime; uint32_t ddb_state; int options; int status; options = LOGOUT_OPTION_CLOSE_SESSION; if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) { ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__); goto clear_ddb; } fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), &fw_ddb_entry_dma, GFP_KERNEL); if (!fw_ddb_entry) { ql4_printk(KERN_ERR, ha, "%s: Unable to allocate dma buffer\n", __func__); goto clear_ddb; } wtime = jiffies + (HZ * LOGOUT_TOV); do { status = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry, fw_ddb_entry_dma, NULL, NULL, &ddb_state, NULL, NULL, NULL); if (status == QLA_ERROR) goto free_ddb; if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) || (ddb_state == DDB_DS_SESSION_FAILED)) goto free_ddb; schedule_timeout_uninterruptible(HZ); } while ((time_after(wtime, jiffies))); free_ddb: dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry, fw_ddb_entry_dma); clear_ddb: qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); } static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha) { struct ddb_entry *ddb_entry; int idx; for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) { ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); if ((ddb_entry != NULL) && (ddb_entry->ddb_type == FLASH_DDB)) { qla4xxx_destroy_ddb(ha, ddb_entry); /* * we have decremented the reference count of the driver * when we setup the session to have the driver unload * to be seamless without actually destroying the * session **/ try_module_get(qla4xxx_iscsi_transport.owner); iscsi_destroy_endpoint(ddb_entry->conn->ep); qla4xxx_free_ddb(ha, ddb_entry); iscsi_session_teardown(ddb_entry->sess); } } } /** * qla4xxx_remove_adapter - callback function to remove adapter. * @pdev: PCI device pointer **/ static void qla4xxx_remove_adapter(struct pci_dev *pdev) { struct scsi_qla_host *ha; /* * If the PCI device is disabled then it means probe_adapter had * failed and resources already cleaned up on probe_adapter exit. */ if (!pci_is_enabled(pdev)) return; ha = pci_get_drvdata(pdev); if (is_qla40XX(ha)) qla4xxx_prevent_other_port_reinit(ha); /* destroy iface from sysfs */ qla4xxx_destroy_ifaces(ha); if ((!ql4xdisablesysfsboot) && ha->boot_kset) iscsi_boot_destroy_kset(ha->boot_kset); qla4xxx_destroy_fw_ddb_session(ha); qla4_8xxx_free_sysfs_attr(ha); qla4xxx_sysfs_ddb_remove(ha); scsi_remove_host(ha->host); qla4xxx_free_adapter(ha); scsi_host_put(ha->host); pci_disable_device(pdev); } /** * qla4xxx_config_dma_addressing() - Configure OS DMA addressing method. * @ha: HA context */ static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha) { /* Update our PCI device dma_mask for full 64 bit mask */ if (dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(64))) { dev_dbg(&ha->pdev->dev, "Failed to set 64 bit PCI consistent mask; " "using 32 bit.\n"); dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(32)); } } static int qla4xxx_slave_alloc(struct scsi_device *sdev) { struct iscsi_cls_session *cls_sess; struct iscsi_session *sess; struct ddb_entry *ddb; int queue_depth = QL4_DEF_QDEPTH; cls_sess = starget_to_session(sdev->sdev_target); sess = cls_sess->dd_data; ddb = sess->dd_data; sdev->hostdata = ddb; if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU) queue_depth = ql4xmaxqdepth; scsi_change_queue_depth(sdev, queue_depth); return 0; } /** * qla4xxx_del_from_active_array - returns an active srb * @ha: Pointer to host adapter structure. * @index: index into the active_array * * This routine removes and returns the srb at the specified index **/ struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha, uint32_t index) { struct srb *srb = NULL; struct scsi_cmnd *cmd = NULL; cmd = scsi_host_find_tag(ha->host, index); if (!cmd) return srb; srb = qla4xxx_cmd_priv(cmd)->srb; if (!srb) return srb; /* update counters */ if (srb->flags & SRB_DMA_VALID) { ha->iocb_cnt -= srb->iocb_cnt; if (srb->cmd) srb->cmd->host_scribble = (unsigned char *)(unsigned long) MAX_SRBS; } return srb; } /** * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware * @ha: Pointer to host adapter structure. * @cmd: Scsi Command to wait on. * * This routine waits for the command to be returned by the Firmware * for some max time. **/ static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha, struct scsi_cmnd *cmd) { int done = 0; struct srb *rp; uint32_t max_wait_time = EH_WAIT_CMD_TOV; int ret = SUCCESS; /* Dont wait on command if PCI error is being handled * by PCI AER driver */ if (unlikely(pci_channel_offline(ha->pdev)) || (test_bit(AF_EEH_BUSY, &ha->flags))) { ql4_printk(KERN_WARNING, ha, "scsi%ld: Return from %s\n", ha->host_no, __func__); return ret; } do { /* Checking to see if its returned to OS */ rp = qla4xxx_cmd_priv(cmd)->srb; if (rp == NULL) { done++; break; } msleep(2000); } while (max_wait_time--); return done; } /** * qla4xxx_wait_for_hba_online - waits for HBA to come online * @ha: Pointer to host adapter structure **/ static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha) { unsigned long wait_online; wait_online = jiffies + (HBA_ONLINE_TOV * HZ); while (time_before(jiffies, wait_online)) { if (adapter_up(ha)) return QLA_SUCCESS; msleep(2000); } return QLA_ERROR; } /** * qla4xxx_eh_wait_for_commands - wait for active cmds to finish. * @ha: pointer to HBA * @stgt: pointer to SCSI target * @sdev: pointer to SCSI device * * This function waits for all outstanding commands to a lun to complete. It * returns 0 if all pending commands are returned and 1 otherwise. **/ static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha, struct scsi_target *stgt, struct scsi_device *sdev) { int cnt; int status = 0; struct scsi_cmnd *cmd; /* * Waiting for all commands for the designated target or dev * in the active array */ for (cnt = 0; cnt < ha->host->can_queue; cnt++) { cmd = scsi_host_find_tag(ha->host, cnt); if (cmd && stgt == scsi_target(cmd->device) && (!sdev || sdev == cmd->device)) { if (!qla4xxx_eh_wait_on_command(ha, cmd)) { status++; break; } } } return status; } /** * qla4xxx_eh_abort - callback for abort task. * @cmd: Pointer to Linux's SCSI command structure * * This routine is called by the Linux OS to abort the specified * command. **/ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd) { struct scsi_qla_host *ha = to_qla_host(cmd->device->host); unsigned int id = cmd->device->id; uint64_t lun = cmd->device->lun; unsigned long flags; struct srb *srb = NULL; int ret = SUCCESS; int wait = 0; int rval; ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Abort command issued cmd=%p, cdb=0x%x\n", ha->host_no, id, lun, cmd, cmd->cmnd[0]); rval = qla4xxx_isp_check_reg(ha); if (rval != QLA_SUCCESS) { ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); return FAILED; } spin_lock_irqsave(&ha->hardware_lock, flags); srb = qla4xxx_cmd_priv(cmd)->srb; if (!srb) { spin_unlock_irqrestore(&ha->hardware_lock, flags); ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Specified command has already completed.\n", ha->host_no, id, lun); return SUCCESS; } kref_get(&srb->srb_ref); spin_unlock_irqrestore(&ha->hardware_lock, flags); if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) { DEBUG3(printk("scsi%ld:%d:%llu: Abort_task mbx failed.\n", ha->host_no, id, lun)); ret = FAILED; } else { DEBUG3(printk("scsi%ld:%d:%llu: Abort_task mbx success.\n", ha->host_no, id, lun)); wait = 1; } kref_put(&srb->srb_ref, qla4xxx_srb_compl); /* Wait for command to complete */ if (wait) { if (!qla4xxx_eh_wait_on_command(ha, cmd)) { DEBUG2(printk("scsi%ld:%d:%llu: Abort handler timed out\n", ha->host_no, id, lun)); ret = FAILED; } } ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Abort command - %s\n", ha->host_no, id, lun, (ret == SUCCESS) ? "succeeded" : "failed"); return ret; } /** * qla4xxx_eh_device_reset - callback for target reset. * @cmd: Pointer to Linux's SCSI command structure * * This routine is called by the Linux OS to reset all luns on the * specified target. **/ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd) { struct scsi_qla_host *ha = to_qla_host(cmd->device->host); struct ddb_entry *ddb_entry = cmd->device->hostdata; int ret = FAILED, stat; int rval; if (!ddb_entry) return ret; ret = iscsi_block_scsi_eh(cmd); if (ret) return ret; ret = FAILED; ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d:%llu: DEVICE RESET ISSUED.\n", ha->host_no, cmd->device->channel, cmd->device->id, cmd->device->lun); DEBUG2(printk(KERN_INFO "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x," "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no, cmd, jiffies, scsi_cmd_to_rq(cmd)->timeout / HZ, ha->dpc_flags, cmd->result, cmd->allowed)); rval = qla4xxx_isp_check_reg(ha); if (rval != QLA_SUCCESS) { ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); return FAILED; } /* FIXME: wait for hba to go online */ stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun); if (stat != QLA_SUCCESS) { ql4_printk(KERN_INFO, ha, "DEVICE RESET FAILED. %d\n", stat); goto eh_dev_reset_done; } if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device), cmd->device)) { ql4_printk(KERN_INFO, ha, "DEVICE RESET FAILED - waiting for " "commands.\n"); goto eh_dev_reset_done; } /* Send marker. */ if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun, MM_LUN_RESET) != QLA_SUCCESS) goto eh_dev_reset_done; ql4_printk(KERN_INFO, ha, "scsi(%ld:%d:%d:%llu): DEVICE RESET SUCCEEDED.\n", ha->host_no, cmd->device->channel, cmd->device->id, cmd->device->lun); ret = SUCCESS; eh_dev_reset_done: return ret; } /** * qla4xxx_eh_target_reset - callback for target reset. * @cmd: Pointer to Linux's SCSI command structure * * This routine is called by the Linux OS to reset the target. **/ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd) { struct scsi_qla_host *ha = to_qla_host(cmd->device->host); struct ddb_entry *ddb_entry = cmd->device->hostdata; int stat, ret; int rval; if (!ddb_entry) return FAILED; ret = iscsi_block_scsi_eh(cmd); if (ret) return ret; starget_printk(KERN_INFO, scsi_target(cmd->device), "WARM TARGET RESET ISSUED.\n"); DEBUG2(printk(KERN_INFO "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, " "to=%x,dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no, cmd, jiffies, scsi_cmd_to_rq(cmd)->timeout / HZ, ha->dpc_flags, cmd->result, cmd->allowed)); rval = qla4xxx_isp_check_reg(ha); if (rval != QLA_SUCCESS) { ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); return FAILED; } stat = qla4xxx_reset_target(ha, ddb_entry); if (stat != QLA_SUCCESS) { starget_printk(KERN_INFO, scsi_target(cmd->device), "WARM TARGET RESET FAILED.\n"); return FAILED; } if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device), NULL)) { starget_printk(KERN_INFO, scsi_target(cmd->device), "WARM TARGET DEVICE RESET FAILED - " "waiting for commands.\n"); return FAILED; } /* Send marker. */ if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun, MM_TGT_WARM_RESET) != QLA_SUCCESS) { starget_printk(KERN_INFO, scsi_target(cmd->device), "WARM TARGET DEVICE RESET FAILED - " "marker iocb failed.\n"); return FAILED; } starget_printk(KERN_INFO, scsi_target(cmd->device), "WARM TARGET RESET SUCCEEDED.\n"); return SUCCESS; } /** * qla4xxx_is_eh_active - check if error handler is running * @shost: Pointer to SCSI Host struct * * This routine finds that if reset host is called in EH * scenario or from some application like sg_reset **/ static int qla4xxx_is_eh_active(struct Scsi_Host *shost) { if (shost->shost_state == SHOST_RECOVERY) return 1; return 0; } /** * qla4xxx_eh_host_reset - kernel callback * @cmd: Pointer to Linux's SCSI command structure * * This routine is invoked by the Linux kernel to perform fatal error * recovery on the specified adapter. **/ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd) { int return_status = FAILED; struct scsi_qla_host *ha; int rval; ha = to_qla_host(cmd->device->host); rval = qla4xxx_isp_check_reg(ha); if (rval != QLA_SUCCESS) { ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); return FAILED; } if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba) qla4_83xx_set_idc_dontreset(ha); /* * For ISP8324 and ISP8042, if IDC_CTRL DONTRESET_BIT0 is set by other * protocol drivers, we should not set device_state to NEED_RESET */ if (ql4xdontresethba || ((is_qla8032(ha) || is_qla8042(ha)) && qla4_83xx_idc_dontreset(ha))) { DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n", ha->host_no, __func__)); /* Clear outstanding srb in queues */ if (qla4xxx_is_eh_active(cmd->device->host)) qla4xxx_abort_active_cmds(ha, DID_ABORT << 16); return FAILED; } ql4_printk(KERN_INFO, ha, "scsi(%ld:%d:%d:%llu): HOST RESET ISSUED.\n", ha->host_no, cmd->device->channel, cmd->device->id, cmd->device->lun); if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) { DEBUG2(printk("scsi%ld:%d: %s: Unable to reset host. Adapter " "DEAD.\n", ha->host_no, cmd->device->channel, __func__)); return FAILED; } if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) { if (is_qla80XX(ha)) set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); else set_bit(DPC_RESET_HA, &ha->dpc_flags); } if (qla4xxx_recover_adapter(ha) == QLA_SUCCESS) return_status = SUCCESS; ql4_printk(KERN_INFO, ha, "HOST RESET %s.\n", return_status == FAILED ? "FAILED" : "SUCCEEDED"); return return_status; } static int qla4xxx_context_reset(struct scsi_qla_host *ha) { uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; struct addr_ctrl_blk_def *acb = NULL; uint32_t acb_len = sizeof(struct addr_ctrl_blk_def); int rval = QLA_SUCCESS; dma_addr_t acb_dma; acb = dma_alloc_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk_def), &acb_dma, GFP_KERNEL); if (!acb) { ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n", __func__); rval = -ENOMEM; goto exit_port_reset; } memset(acb, 0, acb_len); rval = qla4xxx_get_acb(ha, acb_dma, PRIMARI_ACB, acb_len); if (rval != QLA_SUCCESS) { rval = -EIO; goto exit_free_acb; } rval = qla4xxx_disable_acb(ha); if (rval != QLA_SUCCESS) { rval = -EIO; goto exit_free_acb; } wait_for_completion_timeout(&ha->disable_acb_comp, DISABLE_ACB_TOV * HZ); rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma); if (rval != QLA_SUCCESS) { rval = -EIO; goto exit_free_acb; } exit_free_acb: dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk_def), acb, acb_dma); exit_port_reset: DEBUG2(ql4_printk(KERN_INFO, ha, "%s %s\n", __func__, rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED")); return rval; } static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type) { struct scsi_qla_host *ha = to_qla_host(shost); int rval = QLA_SUCCESS; uint32_t idc_ctrl; if (ql4xdontresethba) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Don't Reset HBA\n", __func__)); rval = -EPERM; goto exit_host_reset; } if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) goto recover_adapter; switch (reset_type) { case SCSI_ADAPTER_RESET: set_bit(DPC_RESET_HA, &ha->dpc_flags); break; case SCSI_FIRMWARE_RESET: if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) { if (is_qla80XX(ha)) /* set firmware context reset */ set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); else { rval = qla4xxx_context_reset(ha); goto exit_host_reset; } } break; } recover_adapter: /* For ISP8324 and ISP8042 set graceful reset bit in IDC_DRV_CTRL if * reset is issued by application */ if ((is_qla8032(ha) || is_qla8042(ha)) && test_bit(DPC_RESET_HA, &ha->dpc_flags)) { idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL); qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, (idc_ctrl | GRACEFUL_RESET_BIT1)); } rval = qla4xxx_recover_adapter(ha); if (rval != QLA_SUCCESS) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: recover adapter fail\n", __func__)); rval = -EIO; } exit_host_reset: return rval; } /* PCI AER driver recovers from all correctable errors w/o * driver intervention. For uncorrectable errors PCI AER * driver calls the following device driver's callbacks * * - Fatal Errors - link_reset * - Non-Fatal Errors - driver's error_detected() which * returns CAN_RECOVER, NEED_RESET or DISCONNECT. * * PCI AER driver calls * CAN_RECOVER - driver's mmio_enabled(), mmio_enabled() * returns RECOVERED or NEED_RESET if fw_hung * NEED_RESET - driver's slot_reset() * DISCONNECT - device is dead & cannot recover * RECOVERED - driver's resume() */ static pci_ers_result_t qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct scsi_qla_host *ha = pci_get_drvdata(pdev); ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: error detected:state %x\n", ha->host_no, __func__, state); if (!is_aer_supported(ha)) return PCI_ERS_RESULT_NONE; switch (state) { case pci_channel_io_normal: clear_bit(AF_EEH_BUSY, &ha->flags); return PCI_ERS_RESULT_CAN_RECOVER; case pci_channel_io_frozen: set_bit(AF_EEH_BUSY, &ha->flags); qla4xxx_mailbox_premature_completion(ha); qla4xxx_free_irqs(ha); pci_disable_device(pdev); /* Return back all IOs */ qla4xxx_abort_active_cmds(ha, DID_RESET << 16); return PCI_ERS_RESULT_NEED_RESET; case pci_channel_io_perm_failure: set_bit(AF_EEH_BUSY, &ha->flags); set_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags); qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16); return PCI_ERS_RESULT_DISCONNECT; } return PCI_ERS_RESULT_NEED_RESET; } /** * qla4xxx_pci_mmio_enabled() - gets called if * qla4xxx_pci_error_detected() returns PCI_ERS_RESULT_CAN_RECOVER * and read/write to the device still works. * @pdev: PCI device pointer **/ static pci_ers_result_t qla4xxx_pci_mmio_enabled(struct pci_dev *pdev) { struct scsi_qla_host *ha = pci_get_drvdata(pdev); if (!is_aer_supported(ha)) return PCI_ERS_RESULT_NONE; return PCI_ERS_RESULT_RECOVERED; } static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha) { uint32_t rval = QLA_ERROR; int fn; struct pci_dev *other_pdev = NULL; ql4_printk(KERN_WARNING, ha, "scsi%ld: In %s\n", ha->host_no, __func__); set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); if (test_bit(AF_ONLINE, &ha->flags)) { clear_bit(AF_ONLINE, &ha->flags); clear_bit(AF_LINK_UP, &ha->flags); iscsi_host_for_each_session(ha->host, qla4xxx_fail_session); qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); } fn = PCI_FUNC(ha->pdev->devfn); if (is_qla8022(ha)) { while (fn > 0) { fn--; ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at func %x\n", ha->host_no, __func__, fn); /* Get the pci device given the domain, bus, * slot/function number */ other_pdev = pci_get_domain_bus_and_slot( pci_domain_nr(ha->pdev->bus), ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn), fn)); if (!other_pdev) continue; if (atomic_read(&other_pdev->enable_cnt)) { ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI func in enabled state%x\n", ha->host_no, __func__, fn); pci_dev_put(other_pdev); break; } pci_dev_put(other_pdev); } } else { /* this case is meant for ISP83xx/ISP84xx only */ if (qla4_83xx_can_perform_reset(ha)) { /* reset fn as iSCSI is going to perform the reset */ fn = 0; } } /* The first function on the card, the reset owner will * start & initialize the firmware. The other functions * on the card will reset the firmware context */ if (!fn) { ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn being reset " "0x%x is the owner\n", ha->host_no, __func__, ha->pdev->devfn); ha->isp_ops->idc_lock(ha); qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, QLA8XXX_DEV_COLD); ha->isp_ops->idc_unlock(ha); rval = qla4_8xxx_update_idc_reg(ha); if (rval == QLA_ERROR) { ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: FAILED\n", ha->host_no, __func__); ha->isp_ops->idc_lock(ha); qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, QLA8XXX_DEV_FAILED); ha->isp_ops->idc_unlock(ha); goto exit_error_recovery; } clear_bit(AF_FW_RECOVERY, &ha->flags); rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER); if (rval != QLA_SUCCESS) { ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: " "FAILED\n", ha->host_no, __func__); qla4xxx_free_irqs(ha); ha->isp_ops->idc_lock(ha); qla4_8xxx_clear_drv_active(ha); qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, QLA8XXX_DEV_FAILED); ha->isp_ops->idc_unlock(ha); } else { ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: " "READY\n", ha->host_no, __func__); ha->isp_ops->idc_lock(ha); qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, QLA8XXX_DEV_READY); /* Clear driver state register */ qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, 0); qla4_8xxx_set_drv_active(ha); ha->isp_ops->idc_unlock(ha); ha->isp_ops->enable_intrs(ha); } } else { ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not " "the reset owner\n", ha->host_no, __func__, ha->pdev->devfn); if ((qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE) == QLA8XXX_DEV_READY)) { clear_bit(AF_FW_RECOVERY, &ha->flags); rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER); if (rval == QLA_SUCCESS) ha->isp_ops->enable_intrs(ha); else qla4xxx_free_irqs(ha); ha->isp_ops->idc_lock(ha); qla4_8xxx_set_drv_active(ha); ha->isp_ops->idc_unlock(ha); } } exit_error_recovery: clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); return rval; } static pci_ers_result_t qla4xxx_pci_slot_reset(struct pci_dev *pdev) { pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT; struct scsi_qla_host *ha = pci_get_drvdata(pdev); int rc; ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: slot_reset\n", ha->host_no, __func__); if (!is_aer_supported(ha)) return PCI_ERS_RESULT_NONE; /* Restore the saved state of PCIe device - * BAR registers, PCI Config space, PCIX, MSI, * IOV states */ pci_restore_state(pdev); /* pci_restore_state() clears the saved_state flag of the device * save restored state which resets saved_state flag */ pci_save_state(pdev); /* Initialize device or resume if in suspended state */ rc = pci_enable_device(pdev); if (rc) { ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Can't re-enable " "device after reset\n", ha->host_no, __func__); goto exit_slot_reset; } ha->isp_ops->disable_intrs(ha); if (is_qla80XX(ha)) { if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) { ret = PCI_ERS_RESULT_RECOVERED; goto exit_slot_reset; } else goto exit_slot_reset; } exit_slot_reset: ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Return=%x\n" "device after reset\n", ha->host_no, __func__, ret); return ret; } static void qla4xxx_pci_resume(struct pci_dev *pdev) { struct scsi_qla_host *ha = pci_get_drvdata(pdev); int ret; ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: pci_resume\n", ha->host_no, __func__); ret = qla4xxx_wait_for_hba_online(ha); if (ret != QLA_SUCCESS) { ql4_printk(KERN_ERR, ha, "scsi%ld: %s: the device failed to " "resume I/O from slot/link_reset\n", ha->host_no, __func__); } clear_bit(AF_EEH_BUSY, &ha->flags); } static const struct pci_error_handlers qla4xxx_err_handler = { .error_detected = qla4xxx_pci_error_detected, .mmio_enabled = qla4xxx_pci_mmio_enabled, .slot_reset = qla4xxx_pci_slot_reset, .resume = qla4xxx_pci_resume, }; static struct pci_device_id qla4xxx_pci_tbl[] = { { .vendor = PCI_VENDOR_ID_QLOGIC, .device = PCI_DEVICE_ID_QLOGIC_ISP4010, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .vendor = PCI_VENDOR_ID_QLOGIC, .device = PCI_DEVICE_ID_QLOGIC_ISP4022, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .vendor = PCI_VENDOR_ID_QLOGIC, .device = PCI_DEVICE_ID_QLOGIC_ISP4032, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .vendor = PCI_VENDOR_ID_QLOGIC, .device = PCI_DEVICE_ID_QLOGIC_ISP8022, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .vendor = PCI_VENDOR_ID_QLOGIC, .device = PCI_DEVICE_ID_QLOGIC_ISP8324, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .vendor = PCI_VENDOR_ID_QLOGIC, .device = PCI_DEVICE_ID_QLOGIC_ISP8042, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, {0, 0}, }; MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl); static struct pci_driver qla4xxx_pci_driver = { .name = DRIVER_NAME, .id_table = qla4xxx_pci_tbl, .probe = qla4xxx_probe_adapter, .remove = qla4xxx_remove_adapter, .err_handler = &qla4xxx_err_handler, }; static int __init qla4xxx_module_init(void) { int ret; if (ql4xqfulltracking) qla4xxx_driver_template.track_queue_depth = 1; /* Allocate cache for SRBs. */ srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0, SLAB_HWCACHE_ALIGN, NULL); if (srb_cachep == NULL) { printk(KERN_ERR "%s: Unable to allocate SRB cache..." "Failing load!\n", DRIVER_NAME); ret = -ENOMEM; goto no_srp_cache; } /* Derive version string. */ strcpy(qla4xxx_version_str, QLA4XXX_DRIVER_VERSION); if (ql4xextended_error_logging) strcat(qla4xxx_version_str, "-debug"); qla4xxx_scsi_transport = iscsi_register_transport(&qla4xxx_iscsi_transport); if (!qla4xxx_scsi_transport){ ret = -ENODEV; goto release_srb_cache; } ret = pci_register_driver(&qla4xxx_pci_driver); if (ret) goto unregister_transport; printk(KERN_INFO "QLogic iSCSI HBA Driver\n"); return 0; unregister_transport: iscsi_unregister_transport(&qla4xxx_iscsi_transport); release_srb_cache: kmem_cache_destroy(srb_cachep); no_srp_cache: return ret; } static void __exit qla4xxx_module_exit(void) { pci_unregister_driver(&qla4xxx_pci_driver); iscsi_unregister_transport(&qla4xxx_iscsi_transport); kmem_cache_destroy(srb_cachep); } module_init(qla4xxx_module_init); module_exit(qla4xxx_module_exit); MODULE_AUTHOR("QLogic Corporation"); MODULE_DESCRIPTION("QLogic iSCSI HBA Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(QLA4XXX_DRIVER_VERSION);
linux-master
drivers/scsi/qla4xxx/ql4_os.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic iSCSI HBA Driver * Copyright (c) 2003-2013 QLogic Corporation */ #include <linux/ratelimit.h> #include "ql4_def.h" #include "ql4_version.h" #include "ql4_glbl.h" #include "ql4_dbg.h" #include "ql4_inline.h" uint32_t qla4_83xx_rd_reg(struct scsi_qla_host *ha, ulong addr) { return readl((void __iomem *)(ha->nx_pcibase + addr)); } void qla4_83xx_wr_reg(struct scsi_qla_host *ha, ulong addr, uint32_t val) { writel(val, (void __iomem *)(ha->nx_pcibase + addr)); } static int qla4_83xx_set_win_base(struct scsi_qla_host *ha, uint32_t addr) { uint32_t val; int ret_val = QLA_SUCCESS; qla4_83xx_wr_reg(ha, QLA83XX_CRB_WIN_FUNC(ha->func_num), addr); val = qla4_83xx_rd_reg(ha, QLA83XX_CRB_WIN_FUNC(ha->func_num)); if (val != addr) { ql4_printk(KERN_ERR, ha, "%s: Failed to set register window : addr written 0x%x, read 0x%x!\n", __func__, addr, val); ret_val = QLA_ERROR; } return ret_val; } int qla4_83xx_rd_reg_indirect(struct scsi_qla_host *ha, uint32_t addr, uint32_t *data) { int ret_val; ret_val = qla4_83xx_set_win_base(ha, addr); if (ret_val == QLA_SUCCESS) { *data = qla4_83xx_rd_reg(ha, QLA83XX_WILDCARD); } else { *data = 0xffffffff; ql4_printk(KERN_ERR, ha, "%s: failed read of addr 0x%x!\n", __func__, addr); } return ret_val; } int qla4_83xx_wr_reg_indirect(struct scsi_qla_host *ha, uint32_t addr, uint32_t data) { int ret_val; ret_val = qla4_83xx_set_win_base(ha, addr); if (ret_val == QLA_SUCCESS) qla4_83xx_wr_reg(ha, QLA83XX_WILDCARD, data); else ql4_printk(KERN_ERR, ha, "%s: failed wrt to addr 0x%x, data 0x%x\n", __func__, addr, data); return ret_val; } static int qla4_83xx_flash_lock(struct scsi_qla_host *ha) { int lock_owner; int timeout = 0; uint32_t lock_status = 0; int ret_val = QLA_SUCCESS; while (lock_status == 0) { lock_status = qla4_83xx_rd_reg(ha, QLA83XX_FLASH_LOCK); if (lock_status) break; if (++timeout >= QLA83XX_FLASH_LOCK_TIMEOUT / 20) { lock_owner = qla4_83xx_rd_reg(ha, QLA83XX_FLASH_LOCK_ID); ql4_printk(KERN_ERR, ha, "%s: flash lock by func %d failed, held by func %d\n", __func__, ha->func_num, lock_owner); ret_val = QLA_ERROR; break; } msleep(20); } qla4_83xx_wr_reg(ha, QLA83XX_FLASH_LOCK_ID, ha->func_num); return ret_val; } static void qla4_83xx_flash_unlock(struct scsi_qla_host *ha) { /* Reading FLASH_UNLOCK register unlocks the Flash */ qla4_83xx_wr_reg(ha, QLA83XX_FLASH_LOCK_ID, 0xFF); qla4_83xx_rd_reg(ha, QLA83XX_FLASH_UNLOCK); } int qla4_83xx_flash_read_u32(struct scsi_qla_host *ha, uint32_t flash_addr, uint8_t *p_data, int u32_word_count) { int i; uint32_t u32_word; uint32_t addr = flash_addr; int ret_val = QLA_SUCCESS; ret_val = qla4_83xx_flash_lock(ha); if (ret_val == QLA_ERROR) goto exit_lock_error; if (addr & 0x03) { ql4_printk(KERN_ERR, ha, "%s: Illegal addr = 0x%x\n", __func__, addr); ret_val = QLA_ERROR; goto exit_flash_read; } for (i = 0; i < u32_word_count; i++) { ret_val = qla4_83xx_wr_reg_indirect(ha, QLA83XX_FLASH_DIRECT_WINDOW, (addr & 0xFFFF0000)); if (ret_val == QLA_ERROR) { ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW\n!", __func__, addr); goto exit_flash_read; } ret_val = qla4_83xx_rd_reg_indirect(ha, QLA83XX_FLASH_DIRECT_DATA(addr), &u32_word); if (ret_val == QLA_ERROR) { ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n", __func__, addr); goto exit_flash_read; } *(__le32 *)p_data = le32_to_cpu(u32_word); p_data = p_data + 4; addr = addr + 4; } exit_flash_read: qla4_83xx_flash_unlock(ha); exit_lock_error: return ret_val; } int qla4_83xx_lockless_flash_read_u32(struct scsi_qla_host *ha, uint32_t flash_addr, uint8_t *p_data, int u32_word_count) { uint32_t i; uint32_t u32_word; uint32_t flash_offset; uint32_t addr = flash_addr; int ret_val = QLA_SUCCESS; flash_offset = addr & (QLA83XX_FLASH_SECTOR_SIZE - 1); if (addr & 0x3) { ql4_printk(KERN_ERR, ha, "%s: Illegal addr = 0x%x\n", __func__, addr); ret_val = QLA_ERROR; goto exit_lockless_read; } ret_val = qla4_83xx_wr_reg_indirect(ha, QLA83XX_FLASH_DIRECT_WINDOW, addr); if (ret_val == QLA_ERROR) { ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n", __func__, addr); goto exit_lockless_read; } /* Check if data is spread across multiple sectors */ if ((flash_offset + (u32_word_count * sizeof(uint32_t))) > (QLA83XX_FLASH_SECTOR_SIZE - 1)) { /* Multi sector read */ for (i = 0; i < u32_word_count; i++) { ret_val = qla4_83xx_rd_reg_indirect(ha, QLA83XX_FLASH_DIRECT_DATA(addr), &u32_word); if (ret_val == QLA_ERROR) { ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n", __func__, addr); goto exit_lockless_read; } *(__le32 *)p_data = le32_to_cpu(u32_word); p_data = p_data + 4; addr = addr + 4; flash_offset = flash_offset + 4; if (flash_offset > (QLA83XX_FLASH_SECTOR_SIZE - 1)) { /* This write is needed once for each sector */ ret_val = qla4_83xx_wr_reg_indirect(ha, QLA83XX_FLASH_DIRECT_WINDOW, addr); if (ret_val == QLA_ERROR) { ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n", __func__, addr); goto exit_lockless_read; } flash_offset = 0; } } } else { /* Single sector read */ for (i = 0; i < u32_word_count; i++) { ret_val = qla4_83xx_rd_reg_indirect(ha, QLA83XX_FLASH_DIRECT_DATA(addr), &u32_word); if (ret_val == QLA_ERROR) { ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n", __func__, addr); goto exit_lockless_read; } *(__le32 *)p_data = le32_to_cpu(u32_word); p_data = p_data + 4; addr = addr + 4; } } exit_lockless_read: return ret_val; } void qla4_83xx_rom_lock_recovery(struct scsi_qla_host *ha) { if (qla4_83xx_flash_lock(ha)) ql4_printk(KERN_INFO, ha, "%s: Resetting rom lock\n", __func__); /* * We got the lock, or someone else is holding the lock * since we are restting, forcefully unlock */ qla4_83xx_flash_unlock(ha); } #define INTENT_TO_RECOVER 0x01 #define PROCEED_TO_RECOVER 0x02 static int qla4_83xx_lock_recovery(struct scsi_qla_host *ha) { uint32_t lock = 0, lockid; int ret_val = QLA_ERROR; lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY); /* Check for other Recovery in progress, go wait */ if ((lockid & 0x3) != 0) goto exit_lock_recovery; /* Intent to Recover */ ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY, (ha->func_num << 2) | INTENT_TO_RECOVER); msleep(200); /* Check Intent to Recover is advertised */ lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY); if ((lockid & 0x3C) != (ha->func_num << 2)) goto exit_lock_recovery; ql4_printk(KERN_INFO, ha, "%s: IDC Lock recovery initiated for func %d\n", __func__, ha->func_num); /* Proceed to Recover */ ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY, (ha->func_num << 2) | PROCEED_TO_RECOVER); /* Force Unlock */ ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCK_ID, 0xFF); ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_UNLOCK); /* Clear bits 0-5 in IDC_RECOVERY register*/ ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY, 0); /* Get lock */ lock = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCK); if (lock) { lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCK_ID); lockid = ((lockid + (1 << 8)) & ~0xFF) | ha->func_num; ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCK_ID, lockid); ret_val = QLA_SUCCESS; } exit_lock_recovery: return ret_val; } #define QLA83XX_DRV_LOCK_MSLEEP 200 int qla4_83xx_drv_lock(struct scsi_qla_host *ha) { int timeout = 0; uint32_t status = 0; int ret_val = QLA_SUCCESS; uint32_t first_owner = 0; uint32_t tmo_owner = 0; uint32_t lock_id; uint32_t func_num; uint32_t lock_cnt; while (status == 0) { status = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK); if (status) { /* Increment Counter (8-31) and update func_num (0-7) on * getting a successful lock */ lock_id = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID); lock_id = ((lock_id + (1 << 8)) & ~0xFF) | ha->func_num; qla4_83xx_wr_reg(ha, QLA83XX_DRV_LOCK_ID, lock_id); break; } if (timeout == 0) /* Save counter + ID of function holding the lock for * first failure */ first_owner = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCK_ID); if (++timeout >= (QLA83XX_DRV_LOCK_TIMEOUT / QLA83XX_DRV_LOCK_MSLEEP)) { tmo_owner = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID); func_num = tmo_owner & 0xFF; lock_cnt = tmo_owner >> 8; ql4_printk(KERN_INFO, ha, "%s: Lock by func %d failed after 2s, lock held by func %d, lock count %d, first_owner %d\n", __func__, ha->func_num, func_num, lock_cnt, (first_owner & 0xFF)); if (first_owner != tmo_owner) { /* Some other driver got lock, OR same driver * got lock again (counter value changed), when * we were waiting for lock. * Retry for another 2 sec */ ql4_printk(KERN_INFO, ha, "%s: IDC lock failed for func %d\n", __func__, ha->func_num); timeout = 0; } else { /* Same driver holding lock > 2sec. * Force Recovery */ ret_val = qla4_83xx_lock_recovery(ha); if (ret_val == QLA_SUCCESS) { /* Recovered and got lock */ ql4_printk(KERN_INFO, ha, "%s: IDC lock Recovery by %d successful\n", __func__, ha->func_num); break; } /* Recovery Failed, some other function * has the lock, wait for 2secs and retry */ ql4_printk(KERN_INFO, ha, "%s: IDC lock Recovery by %d failed, Retrying timeout\n", __func__, ha->func_num); timeout = 0; } } msleep(QLA83XX_DRV_LOCK_MSLEEP); } return ret_val; } void qla4_83xx_drv_unlock(struct scsi_qla_host *ha) { int id; id = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID); if ((id & 0xFF) != ha->func_num) { ql4_printk(KERN_ERR, ha, "%s: IDC Unlock by %d failed, lock owner is %d\n", __func__, ha->func_num, (id & 0xFF)); return; } /* Keep lock counter value, update the ha->func_num to 0xFF */ qla4_83xx_wr_reg(ha, QLA83XX_DRV_LOCK_ID, (id | 0xFF)); qla4_83xx_rd_reg(ha, QLA83XX_DRV_UNLOCK); } void qla4_83xx_set_idc_dontreset(struct scsi_qla_host *ha) { uint32_t idc_ctrl; idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL); idc_ctrl |= DONTRESET_BIT0; qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, idc_ctrl); DEBUG2(ql4_printk(KERN_INFO, ha, "%s: idc_ctrl = %d\n", __func__, idc_ctrl)); } void qla4_83xx_clear_idc_dontreset(struct scsi_qla_host *ha) { uint32_t idc_ctrl; idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL); idc_ctrl &= ~DONTRESET_BIT0; qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, idc_ctrl); DEBUG2(ql4_printk(KERN_INFO, ha, "%s: idc_ctrl = %d\n", __func__, idc_ctrl)); } int qla4_83xx_idc_dontreset(struct scsi_qla_host *ha) { uint32_t idc_ctrl; idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL); return idc_ctrl & DONTRESET_BIT0; } /*-------------------------IDC State Machine ---------------------*/ enum { UNKNOWN_CLASS = 0, NIC_CLASS, FCOE_CLASS, ISCSI_CLASS }; struct device_info { int func_num; int device_type; int port_num; }; int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha) { uint32_t drv_active; uint32_t dev_part, dev_part1, dev_part2; int i; struct device_info device_map[16]; int func_nibble; int nibble; int nic_present = 0; int iscsi_present = 0; int iscsi_func_low = 0; /* Use the dev_partition register to determine the PCI function number * and then check drv_active register to see which driver is loaded */ dev_part1 = qla4_83xx_rd_reg(ha, ha->reg_tbl[QLA8XXX_CRB_DEV_PART_INFO]); dev_part2 = qla4_83xx_rd_reg(ha, QLA83XX_CRB_DEV_PART_INFO2); drv_active = qla4_83xx_rd_reg(ha, ha->reg_tbl[QLA8XXX_CRB_DRV_ACTIVE]); /* Each function has 4 bits in dev_partition Info register, * Lower 2 bits - device type, Upper 2 bits - physical port number */ dev_part = dev_part1; for (i = nibble = 0; i <= 15; i++, nibble++) { func_nibble = dev_part & (0xF << (nibble * 4)); func_nibble >>= (nibble * 4); device_map[i].func_num = i; device_map[i].device_type = func_nibble & 0x3; device_map[i].port_num = func_nibble & 0xC; if (device_map[i].device_type == NIC_CLASS) { if (drv_active & (1 << device_map[i].func_num)) { nic_present++; break; } } else if (device_map[i].device_type == ISCSI_CLASS) { if (drv_active & (1 << device_map[i].func_num)) { if (!iscsi_present || iscsi_func_low > device_map[i].func_num) iscsi_func_low = device_map[i].func_num; iscsi_present++; } } /* For function_num[8..15] get info from dev_part2 register */ if (nibble == 7) { nibble = 0; dev_part = dev_part2; } } /* NIC, iSCSI and FCOE are the Reset owners based on order, NIC gets * precedence over iSCSI and FCOE and iSCSI over FCOE, based on drivers * present. */ if (!nic_present && (ha->func_num == iscsi_func_low)) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: can reset - NIC not present and lower iSCSI function is %d\n", __func__, ha->func_num)); return 1; } return 0; } /** * qla4_83xx_need_reset_handler - Code to start reset sequence * @ha: pointer to adapter structure * * Note: IDC lock must be held upon entry **/ void qla4_83xx_need_reset_handler(struct scsi_qla_host *ha) { uint32_t dev_state, drv_state, drv_active; unsigned long reset_timeout, dev_init_timeout; ql4_printk(KERN_INFO, ha, "%s: Performing ISP error recovery\n", __func__); if (!test_bit(AF_8XXX_RST_OWNER, &ha->flags)) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: reset acknowledged\n", __func__)); qla4_8xxx_set_rst_ready(ha); /* Non-reset owners ACK Reset and wait for device INIT state * as part of Reset Recovery by Reset Owner */ dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ); do { if (time_after_eq(jiffies, dev_init_timeout)) { ql4_printk(KERN_INFO, ha, "%s: Non Reset owner dev init timeout\n", __func__); break; } ha->isp_ops->idc_unlock(ha); msleep(1000); ha->isp_ops->idc_lock(ha); dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE); } while (dev_state == QLA8XXX_DEV_NEED_RESET); } else { qla4_8xxx_set_rst_ready(ha); reset_timeout = jiffies + (ha->nx_reset_timeout * HZ); drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE); drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE); ql4_printk(KERN_INFO, ha, "%s: drv_state = 0x%x, drv_active = 0x%x\n", __func__, drv_state, drv_active); while (drv_state != drv_active) { if (time_after_eq(jiffies, reset_timeout)) { ql4_printk(KERN_INFO, ha, "%s: %s: RESET TIMEOUT! drv_state: 0x%08x, drv_active: 0x%08x\n", __func__, DRIVER_NAME, drv_state, drv_active); break; } ha->isp_ops->idc_unlock(ha); msleep(1000); ha->isp_ops->idc_lock(ha); drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE); drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE); } if (drv_state != drv_active) { ql4_printk(KERN_INFO, ha, "%s: Reset_owner turning off drv_active of non-acking function 0x%x\n", __func__, (drv_active ^ drv_state)); drv_active = drv_active & drv_state; qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_ACTIVE, drv_active); } clear_bit(AF_8XXX_RST_OWNER, &ha->flags); /* Start Reset Recovery */ qla4_8xxx_device_bootstrap(ha); } } void qla4_83xx_get_idc_param(struct scsi_qla_host *ha) { uint32_t idc_params, ret_val; ret_val = qla4_83xx_flash_read_u32(ha, QLA83XX_IDC_PARAM_ADDR, (uint8_t *)&idc_params, 1); if (ret_val == QLA_SUCCESS) { ha->nx_dev_init_timeout = idc_params & 0xFFFF; ha->nx_reset_timeout = (idc_params >> 16) & 0xFFFF; } else { ha->nx_dev_init_timeout = ROM_DEV_INIT_TIMEOUT; ha->nx_reset_timeout = ROM_DRV_RESET_ACK_TIMEOUT; } DEBUG2(ql4_printk(KERN_DEBUG, ha, "%s: ha->nx_dev_init_timeout = %d, ha->nx_reset_timeout = %d\n", __func__, ha->nx_dev_init_timeout, ha->nx_reset_timeout)); } /*-------------------------Reset Sequence Functions-----------------------*/ static void qla4_83xx_dump_reset_seq_hdr(struct scsi_qla_host *ha) { uint8_t *phdr; if (!ha->reset_tmplt.buff) { ql4_printk(KERN_ERR, ha, "%s: Error: Invalid reset_seq_template\n", __func__); return; } phdr = ha->reset_tmplt.buff; DEBUG2(ql4_printk(KERN_INFO, ha, "Reset Template: 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n", *phdr, *(phdr+1), *(phdr+2), *(phdr+3), *(phdr+4), *(phdr+5), *(phdr+6), *(phdr+7), *(phdr + 8), *(phdr+9), *(phdr+10), *(phdr+11), *(phdr+12), *(phdr+13), *(phdr+14), *(phdr+15))); } static int qla4_83xx_copy_bootloader(struct scsi_qla_host *ha) { uint8_t *p_cache; uint32_t src, count, size; uint64_t dest; int ret_val = QLA_SUCCESS; src = QLA83XX_BOOTLOADER_FLASH_ADDR; dest = qla4_83xx_rd_reg(ha, QLA83XX_BOOTLOADER_ADDR); size = qla4_83xx_rd_reg(ha, QLA83XX_BOOTLOADER_SIZE); /* 128 bit alignment check */ if (size & 0xF) size = (size + 16) & ~0xF; /* 16 byte count */ count = size/16; p_cache = vmalloc(size); if (p_cache == NULL) { ql4_printk(KERN_ERR, ha, "%s: Failed to allocate memory for boot loader cache\n", __func__); ret_val = QLA_ERROR; goto exit_copy_bootloader; } ret_val = qla4_83xx_lockless_flash_read_u32(ha, src, p_cache, size / sizeof(uint32_t)); if (ret_val == QLA_ERROR) { ql4_printk(KERN_ERR, ha, "%s: Error reading firmware from flash\n", __func__); goto exit_copy_error; } DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Read firmware from flash\n", __func__)); /* 128 bit/16 byte write to MS memory */ ret_val = qla4_8xxx_ms_mem_write_128b(ha, dest, (uint32_t *)p_cache, count); if (ret_val == QLA_ERROR) { ql4_printk(KERN_ERR, ha, "%s: Error writing firmware to MS\n", __func__); goto exit_copy_error; } DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Wrote firmware size %d to MS\n", __func__, size)); exit_copy_error: vfree(p_cache); exit_copy_bootloader: return ret_val; } static int qla4_83xx_check_cmd_peg_status(struct scsi_qla_host *ha) { uint32_t val, ret_val = QLA_ERROR; int retries = CRB_CMDPEG_CHECK_RETRY_COUNT; do { val = qla4_83xx_rd_reg(ha, QLA83XX_CMDPEG_STATE); if (val == PHAN_INITIALIZE_COMPLETE) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Command Peg initialization complete. State=0x%x\n", __func__, val)); ret_val = QLA_SUCCESS; break; } msleep(CRB_CMDPEG_CHECK_DELAY); } while (--retries); return ret_val; } /** * qla4_83xx_poll_reg - Poll the given CRB addr for duration msecs till * value read ANDed with test_mask is equal to test_result. * * @ha : Pointer to adapter structure * @addr : CRB register address * @duration : Poll for total of "duration" msecs * @test_mask : Mask value read with "test_mask" * @test_result : Compare (value&test_mask) with test_result. **/ static int qla4_83xx_poll_reg(struct scsi_qla_host *ha, uint32_t addr, int duration, uint32_t test_mask, uint32_t test_result) { uint32_t value; uint8_t retries; int ret_val = QLA_SUCCESS; ret_val = qla4_83xx_rd_reg_indirect(ha, addr, &value); if (ret_val == QLA_ERROR) goto exit_poll_reg; retries = duration / 10; do { if ((value & test_mask) != test_result) { msleep(duration / 10); ret_val = qla4_83xx_rd_reg_indirect(ha, addr, &value); if (ret_val == QLA_ERROR) goto exit_poll_reg; ret_val = QLA_ERROR; } else { ret_val = QLA_SUCCESS; break; } } while (retries--); exit_poll_reg: if (ret_val == QLA_ERROR) { ha->reset_tmplt.seq_error++; ql4_printk(KERN_ERR, ha, "%s: Poll Failed: 0x%08x 0x%08x 0x%08x\n", __func__, value, test_mask, test_result); } return ret_val; } static int qla4_83xx_reset_seq_checksum_test(struct scsi_qla_host *ha) { uint32_t sum = 0; uint16_t *buff = (uint16_t *)ha->reset_tmplt.buff; int u16_count = ha->reset_tmplt.hdr->size / sizeof(uint16_t); int ret_val; while (u16_count-- > 0) sum += *buff++; while (sum >> 16) sum = (sum & 0xFFFF) + (sum >> 16); /* checksum of 0 indicates a valid template */ if (~sum) { ret_val = QLA_SUCCESS; } else { ql4_printk(KERN_ERR, ha, "%s: Reset seq checksum failed\n", __func__); ret_val = QLA_ERROR; } return ret_val; } /** * qla4_83xx_read_reset_template - Read Reset Template from Flash * @ha: Pointer to adapter structure **/ void qla4_83xx_read_reset_template(struct scsi_qla_host *ha) { uint8_t *p_buff; uint32_t addr, tmplt_hdr_def_size, tmplt_hdr_size; uint32_t ret_val; ha->reset_tmplt.seq_error = 0; ha->reset_tmplt.buff = vmalloc(QLA83XX_RESTART_TEMPLATE_SIZE); if (ha->reset_tmplt.buff == NULL) { ql4_printk(KERN_ERR, ha, "%s: Failed to allocate reset template resources\n", __func__); goto exit_read_reset_template; } p_buff = ha->reset_tmplt.buff; addr = QLA83XX_RESET_TEMPLATE_ADDR; tmplt_hdr_def_size = sizeof(struct qla4_83xx_reset_template_hdr) / sizeof(uint32_t); DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Read template hdr size %d from Flash\n", __func__, tmplt_hdr_def_size)); /* Copy template header from flash */ ret_val = qla4_83xx_flash_read_u32(ha, addr, p_buff, tmplt_hdr_def_size); if (ret_val != QLA_SUCCESS) { ql4_printk(KERN_ERR, ha, "%s: Failed to read reset template\n", __func__); goto exit_read_template_error; } ha->reset_tmplt.hdr = (struct qla4_83xx_reset_template_hdr *)ha->reset_tmplt.buff; /* Validate the template header size and signature */ tmplt_hdr_size = ha->reset_tmplt.hdr->hdr_size/sizeof(uint32_t); if ((tmplt_hdr_size != tmplt_hdr_def_size) || (ha->reset_tmplt.hdr->signature != RESET_TMPLT_HDR_SIGNATURE)) { ql4_printk(KERN_ERR, ha, "%s: Template Header size %d is invalid, tmplt_hdr_def_size %d\n", __func__, tmplt_hdr_size, tmplt_hdr_def_size); goto exit_read_template_error; } addr = QLA83XX_RESET_TEMPLATE_ADDR + ha->reset_tmplt.hdr->hdr_size; p_buff = ha->reset_tmplt.buff + ha->reset_tmplt.hdr->hdr_size; tmplt_hdr_def_size = (ha->reset_tmplt.hdr->size - ha->reset_tmplt.hdr->hdr_size) / sizeof(uint32_t); DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Read rest of the template size %d\n", __func__, ha->reset_tmplt.hdr->size)); /* Copy rest of the template */ ret_val = qla4_83xx_flash_read_u32(ha, addr, p_buff, tmplt_hdr_def_size); if (ret_val != QLA_SUCCESS) { ql4_printk(KERN_ERR, ha, "%s: Failed to read reset template\n", __func__); goto exit_read_template_error; } /* Integrity check */ if (qla4_83xx_reset_seq_checksum_test(ha)) { ql4_printk(KERN_ERR, ha, "%s: Reset Seq checksum failed!\n", __func__); goto exit_read_template_error; } DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Reset Seq checksum passed, Get stop, start and init seq offsets\n", __func__)); /* Get STOP, START, INIT sequence offsets */ ha->reset_tmplt.init_offset = ha->reset_tmplt.buff + ha->reset_tmplt.hdr->init_seq_offset; ha->reset_tmplt.start_offset = ha->reset_tmplt.buff + ha->reset_tmplt.hdr->start_seq_offset; ha->reset_tmplt.stop_offset = ha->reset_tmplt.buff + ha->reset_tmplt.hdr->hdr_size; qla4_83xx_dump_reset_seq_hdr(ha); goto exit_read_reset_template; exit_read_template_error: vfree(ha->reset_tmplt.buff); exit_read_reset_template: return; } /** * qla4_83xx_read_write_crb_reg - Read from raddr and write value to waddr. * * @ha : Pointer to adapter structure * @raddr : CRB address to read from * @waddr : CRB address to write to **/ static void qla4_83xx_read_write_crb_reg(struct scsi_qla_host *ha, uint32_t raddr, uint32_t waddr) { uint32_t value; qla4_83xx_rd_reg_indirect(ha, raddr, &value); qla4_83xx_wr_reg_indirect(ha, waddr, value); } /** * qla4_83xx_rmw_crb_reg - Read Modify Write crb register * * This function read value from raddr, AND with test_mask, * Shift Left,Right/OR/XOR with values RMW header and write value to waddr. * * @ha : Pointer to adapter structure * @raddr : CRB address to read from * @waddr : CRB address to write to * @p_rmw_hdr : header with shift/or/xor values. **/ static void qla4_83xx_rmw_crb_reg(struct scsi_qla_host *ha, uint32_t raddr, uint32_t waddr, struct qla4_83xx_rmw *p_rmw_hdr) { uint32_t value; if (p_rmw_hdr->index_a) value = ha->reset_tmplt.array[p_rmw_hdr->index_a]; else qla4_83xx_rd_reg_indirect(ha, raddr, &value); value &= p_rmw_hdr->test_mask; value <<= p_rmw_hdr->shl; value >>= p_rmw_hdr->shr; value |= p_rmw_hdr->or_value; value ^= p_rmw_hdr->xor_value; qla4_83xx_wr_reg_indirect(ha, waddr, value); return; } static void qla4_83xx_write_list(struct scsi_qla_host *ha, struct qla4_83xx_reset_entry_hdr *p_hdr) { struct qla4_83xx_entry *p_entry; uint32_t i; p_entry = (struct qla4_83xx_entry *) ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr)); for (i = 0; i < p_hdr->count; i++, p_entry++) { qla4_83xx_wr_reg_indirect(ha, p_entry->arg1, p_entry->arg2); if (p_hdr->delay) udelay((uint32_t)(p_hdr->delay)); } } static void qla4_83xx_read_write_list(struct scsi_qla_host *ha, struct qla4_83xx_reset_entry_hdr *p_hdr) { struct qla4_83xx_entry *p_entry; uint32_t i; p_entry = (struct qla4_83xx_entry *) ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr)); for (i = 0; i < p_hdr->count; i++, p_entry++) { qla4_83xx_read_write_crb_reg(ha, p_entry->arg1, p_entry->arg2); if (p_hdr->delay) udelay((uint32_t)(p_hdr->delay)); } } static void qla4_83xx_poll_list(struct scsi_qla_host *ha, struct qla4_83xx_reset_entry_hdr *p_hdr) { long delay; struct qla4_83xx_entry *p_entry; struct qla4_83xx_poll *p_poll; uint32_t i; uint32_t value; p_poll = (struct qla4_83xx_poll *) ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr)); /* Entries start after 8 byte qla4_83xx_poll, poll header contains * the test_mask, test_value. */ p_entry = (struct qla4_83xx_entry *)((char *)p_poll + sizeof(struct qla4_83xx_poll)); delay = (long)p_hdr->delay; if (!delay) { for (i = 0; i < p_hdr->count; i++, p_entry++) { qla4_83xx_poll_reg(ha, p_entry->arg1, delay, p_poll->test_mask, p_poll->test_value); } } else { for (i = 0; i < p_hdr->count; i++, p_entry++) { if (qla4_83xx_poll_reg(ha, p_entry->arg1, delay, p_poll->test_mask, p_poll->test_value)) { qla4_83xx_rd_reg_indirect(ha, p_entry->arg1, &value); qla4_83xx_rd_reg_indirect(ha, p_entry->arg2, &value); } } } } static void qla4_83xx_poll_write_list(struct scsi_qla_host *ha, struct qla4_83xx_reset_entry_hdr *p_hdr) { long delay; struct qla4_83xx_quad_entry *p_entry; struct qla4_83xx_poll *p_poll; uint32_t i; p_poll = (struct qla4_83xx_poll *) ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr)); p_entry = (struct qla4_83xx_quad_entry *) ((char *)p_poll + sizeof(struct qla4_83xx_poll)); delay = (long)p_hdr->delay; for (i = 0; i < p_hdr->count; i++, p_entry++) { qla4_83xx_wr_reg_indirect(ha, p_entry->dr_addr, p_entry->dr_value); qla4_83xx_wr_reg_indirect(ha, p_entry->ar_addr, p_entry->ar_value); if (delay) { if (qla4_83xx_poll_reg(ha, p_entry->ar_addr, delay, p_poll->test_mask, p_poll->test_value)) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Timeout Error: poll list, item_num %d, entry_num %d\n", __func__, i, ha->reset_tmplt.seq_index)); } } } } static void qla4_83xx_read_modify_write(struct scsi_qla_host *ha, struct qla4_83xx_reset_entry_hdr *p_hdr) { struct qla4_83xx_entry *p_entry; struct qla4_83xx_rmw *p_rmw_hdr; uint32_t i; p_rmw_hdr = (struct qla4_83xx_rmw *) ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr)); p_entry = (struct qla4_83xx_entry *) ((char *)p_rmw_hdr + sizeof(struct qla4_83xx_rmw)); for (i = 0; i < p_hdr->count; i++, p_entry++) { qla4_83xx_rmw_crb_reg(ha, p_entry->arg1, p_entry->arg2, p_rmw_hdr); if (p_hdr->delay) udelay((uint32_t)(p_hdr->delay)); } } static void qla4_83xx_pause(struct scsi_qla_host *ha, struct qla4_83xx_reset_entry_hdr *p_hdr) { if (p_hdr->delay) mdelay((uint32_t)((long)p_hdr->delay)); } static void qla4_83xx_poll_read_list(struct scsi_qla_host *ha, struct qla4_83xx_reset_entry_hdr *p_hdr) { long delay; int index; struct qla4_83xx_quad_entry *p_entry; struct qla4_83xx_poll *p_poll; uint32_t i; uint32_t value; p_poll = (struct qla4_83xx_poll *) ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr)); p_entry = (struct qla4_83xx_quad_entry *) ((char *)p_poll + sizeof(struct qla4_83xx_poll)); delay = (long)p_hdr->delay; for (i = 0; i < p_hdr->count; i++, p_entry++) { qla4_83xx_wr_reg_indirect(ha, p_entry->ar_addr, p_entry->ar_value); if (delay) { if (qla4_83xx_poll_reg(ha, p_entry->ar_addr, delay, p_poll->test_mask, p_poll->test_value)) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Timeout Error: poll list, Item_num %d, entry_num %d\n", __func__, i, ha->reset_tmplt.seq_index)); } else { index = ha->reset_tmplt.array_index; qla4_83xx_rd_reg_indirect(ha, p_entry->dr_addr, &value); ha->reset_tmplt.array[index++] = value; if (index == QLA83XX_MAX_RESET_SEQ_ENTRIES) ha->reset_tmplt.array_index = 1; } } } } static void qla4_83xx_seq_end(struct scsi_qla_host *ha, struct qla4_83xx_reset_entry_hdr *p_hdr) { ha->reset_tmplt.seq_end = 1; } static void qla4_83xx_template_end(struct scsi_qla_host *ha, struct qla4_83xx_reset_entry_hdr *p_hdr) { ha->reset_tmplt.template_end = 1; if (ha->reset_tmplt.seq_error == 0) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Reset sequence completed SUCCESSFULLY.\n", __func__)); } else { ql4_printk(KERN_ERR, ha, "%s: Reset sequence completed with some timeout errors.\n", __func__); } } /** * qla4_83xx_process_reset_template - Process reset template. * * Process all entries in reset template till entry with SEQ_END opcode, * which indicates end of the reset template processing. Each entry has a * Reset Entry header, entry opcode/command, with size of the entry, number * of entries in sub-sequence and delay in microsecs or timeout in millisecs. * * @ha : Pointer to adapter structure * @p_buff : Common reset entry header. **/ static void qla4_83xx_process_reset_template(struct scsi_qla_host *ha, char *p_buff) { int index, entries; struct qla4_83xx_reset_entry_hdr *p_hdr; char *p_entry = p_buff; ha->reset_tmplt.seq_end = 0; ha->reset_tmplt.template_end = 0; entries = ha->reset_tmplt.hdr->entries; index = ha->reset_tmplt.seq_index; for (; (!ha->reset_tmplt.seq_end) && (index < entries); index++) { p_hdr = (struct qla4_83xx_reset_entry_hdr *)p_entry; switch (p_hdr->cmd) { case OPCODE_NOP: break; case OPCODE_WRITE_LIST: qla4_83xx_write_list(ha, p_hdr); break; case OPCODE_READ_WRITE_LIST: qla4_83xx_read_write_list(ha, p_hdr); break; case OPCODE_POLL_LIST: qla4_83xx_poll_list(ha, p_hdr); break; case OPCODE_POLL_WRITE_LIST: qla4_83xx_poll_write_list(ha, p_hdr); break; case OPCODE_READ_MODIFY_WRITE: qla4_83xx_read_modify_write(ha, p_hdr); break; case OPCODE_SEQ_PAUSE: qla4_83xx_pause(ha, p_hdr); break; case OPCODE_SEQ_END: qla4_83xx_seq_end(ha, p_hdr); break; case OPCODE_TMPL_END: qla4_83xx_template_end(ha, p_hdr); break; case OPCODE_POLL_READ_LIST: qla4_83xx_poll_read_list(ha, p_hdr); break; default: ql4_printk(KERN_ERR, ha, "%s: Unknown command ==> 0x%04x on entry = %d\n", __func__, p_hdr->cmd, index); break; } /* Set pointer to next entry in the sequence. */ p_entry += p_hdr->size; } ha->reset_tmplt.seq_index = index; } static void qla4_83xx_process_stop_seq(struct scsi_qla_host *ha) { ha->reset_tmplt.seq_index = 0; qla4_83xx_process_reset_template(ha, ha->reset_tmplt.stop_offset); if (ha->reset_tmplt.seq_end != 1) ql4_printk(KERN_ERR, ha, "%s: Abrupt STOP Sub-Sequence end.\n", __func__); } static void qla4_83xx_process_start_seq(struct scsi_qla_host *ha) { qla4_83xx_process_reset_template(ha, ha->reset_tmplt.start_offset); if (ha->reset_tmplt.template_end != 1) ql4_printk(KERN_ERR, ha, "%s: Abrupt START Sub-Sequence end.\n", __func__); } static void qla4_83xx_process_init_seq(struct scsi_qla_host *ha) { qla4_83xx_process_reset_template(ha, ha->reset_tmplt.init_offset); if (ha->reset_tmplt.seq_end != 1) ql4_printk(KERN_ERR, ha, "%s: Abrupt INIT Sub-Sequence end.\n", __func__); } static int qla4_83xx_restart(struct scsi_qla_host *ha) { int ret_val = QLA_SUCCESS; uint32_t idc_ctrl; qla4_83xx_process_stop_seq(ha); /* * Collect minidump. * If IDC_CTRL BIT1 is set, clear it on going to INIT state and * don't collect minidump */ idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL); if (idc_ctrl & GRACEFUL_RESET_BIT1) { qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, (idc_ctrl & ~GRACEFUL_RESET_BIT1)); ql4_printk(KERN_INFO, ha, "%s: Graceful RESET: Not collecting minidump\n", __func__); } else { qla4_8xxx_get_minidump(ha); } qla4_83xx_process_init_seq(ha); if (qla4_83xx_copy_bootloader(ha)) { ql4_printk(KERN_ERR, ha, "%s: Copy bootloader, firmware restart failed!\n", __func__); ret_val = QLA_ERROR; goto exit_restart; } qla4_83xx_wr_reg(ha, QLA83XX_FW_IMAGE_VALID, QLA83XX_BOOT_FROM_FLASH); qla4_83xx_process_start_seq(ha); exit_restart: return ret_val; } int qla4_83xx_start_firmware(struct scsi_qla_host *ha) { int ret_val = QLA_SUCCESS; ret_val = qla4_83xx_restart(ha); if (ret_val == QLA_ERROR) { ql4_printk(KERN_ERR, ha, "%s: Restart error\n", __func__); goto exit_start_fw; } else { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Restart done\n", __func__)); } ret_val = qla4_83xx_check_cmd_peg_status(ha); if (ret_val == QLA_ERROR) ql4_printk(KERN_ERR, ha, "%s: Peg not initialized\n", __func__); exit_start_fw: return ret_val; } /*----------------------Interrupt Related functions ---------------------*/ static void qla4_83xx_disable_iocb_intrs(struct scsi_qla_host *ha) { if (test_and_clear_bit(AF_83XX_IOCB_INTR_ON, &ha->flags)) qla4_8xxx_intr_disable(ha); } static void qla4_83xx_disable_mbox_intrs(struct scsi_qla_host *ha) { uint32_t mb_int, ret; if (test_and_clear_bit(AF_83XX_MBOX_INTR_ON, &ha->flags)) { ret = readl(&ha->qla4_83xx_reg->mbox_int); mb_int = ret & ~INT_ENABLE_FW_MB; writel(mb_int, &ha->qla4_83xx_reg->mbox_int); writel(1, &ha->qla4_83xx_reg->leg_int_mask); } } void qla4_83xx_disable_intrs(struct scsi_qla_host *ha) { qla4_83xx_disable_mbox_intrs(ha); qla4_83xx_disable_iocb_intrs(ha); } static void qla4_83xx_enable_iocb_intrs(struct scsi_qla_host *ha) { if (!test_bit(AF_83XX_IOCB_INTR_ON, &ha->flags)) { qla4_8xxx_intr_enable(ha); set_bit(AF_83XX_IOCB_INTR_ON, &ha->flags); } } void qla4_83xx_enable_mbox_intrs(struct scsi_qla_host *ha) { uint32_t mb_int; if (!test_bit(AF_83XX_MBOX_INTR_ON, &ha->flags)) { mb_int = INT_ENABLE_FW_MB; writel(mb_int, &ha->qla4_83xx_reg->mbox_int); writel(0, &ha->qla4_83xx_reg->leg_int_mask); set_bit(AF_83XX_MBOX_INTR_ON, &ha->flags); } } void qla4_83xx_enable_intrs(struct scsi_qla_host *ha) { qla4_83xx_enable_mbox_intrs(ha); qla4_83xx_enable_iocb_intrs(ha); } void qla4_83xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd, int incount) { int i; /* Load all mailbox registers, except mailbox 0. */ for (i = 1; i < incount; i++) writel(mbx_cmd[i], &ha->qla4_83xx_reg->mailbox_in[i]); writel(mbx_cmd[0], &ha->qla4_83xx_reg->mailbox_in[0]); /* Set Host Interrupt register to 1, to tell the firmware that * a mailbox command is pending. Firmware after reading the * mailbox command, clears the host interrupt register */ writel(HINT_MBX_INT_PENDING, &ha->qla4_83xx_reg->host_intr); } void qla4_83xx_process_mbox_intr(struct scsi_qla_host *ha, int outcount) { int intr_status; intr_status = readl(&ha->qla4_83xx_reg->risc_intr); if (intr_status) { ha->mbox_status_count = outcount; ha->isp_ops->interrupt_service_routine(ha, intr_status); } } /** * qla4_83xx_isp_reset - Resets ISP and aborts all outstanding commands. * @ha: pointer to host adapter structure. **/ int qla4_83xx_isp_reset(struct scsi_qla_host *ha) { int rval; uint32_t dev_state; ha->isp_ops->idc_lock(ha); dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE); if (ql4xdontresethba) qla4_83xx_set_idc_dontreset(ha); if (dev_state == QLA8XXX_DEV_READY) { /* If IDC_CTRL DONTRESETHBA_BIT0 is set dont do reset * recovery */ if (qla4_83xx_idc_dontreset(ha) == DONTRESET_BIT0) { ql4_printk(KERN_ERR, ha, "%s: Reset recovery disabled\n", __func__); rval = QLA_ERROR; goto exit_isp_reset; } DEBUG2(ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET\n", __func__)); qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, QLA8XXX_DEV_NEED_RESET); } else { /* If device_state is NEED_RESET, go ahead with * Reset,irrespective of ql4xdontresethba. This is to allow a * non-reset-owner to force a reset. Non-reset-owner sets * the IDC_CTRL BIT0 to prevent Reset-owner from doing a Reset * and then forces a Reset by setting device_state to * NEED_RESET. */ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: HW state already set to NEED_RESET\n", __func__)); } /* For ISP8324 and ISP8042, Reset owner is NIC, iSCSI or FCOE based on * priority and which drivers are present. Unlike ISP8022, the function * setting NEED_RESET, may not be the Reset owner. */ if (qla4_83xx_can_perform_reset(ha)) set_bit(AF_8XXX_RST_OWNER, &ha->flags); ha->isp_ops->idc_unlock(ha); rval = qla4_8xxx_device_state_handler(ha); ha->isp_ops->idc_lock(ha); qla4_8xxx_clear_rst_ready(ha); exit_isp_reset: ha->isp_ops->idc_unlock(ha); if (rval == QLA_SUCCESS) clear_bit(AF_FW_RECOVERY, &ha->flags); return rval; } static void qla4_83xx_dump_pause_control_regs(struct scsi_qla_host *ha) { u32 val = 0, val1 = 0; int i; qla4_83xx_rd_reg_indirect(ha, QLA83XX_SRE_SHIM_CONTROL, &val); DEBUG2(ql4_printk(KERN_INFO, ha, "SRE-Shim Ctrl:0x%x\n", val)); /* Port 0 Rx Buffer Pause Threshold Registers. */ DEBUG2(ql4_printk(KERN_INFO, ha, "Port 0 Rx Buffer Pause Threshold Registers[TC7..TC0]:")); for (i = 0; i < 8; i++) { qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT0_RXB_PAUSE_THRS + (i * 0x4), &val); DEBUG2(pr_info("0x%x ", val)); } DEBUG2(pr_info("\n")); /* Port 1 Rx Buffer Pause Threshold Registers. */ DEBUG2(ql4_printk(KERN_INFO, ha, "Port 1 Rx Buffer Pause Threshold Registers[TC7..TC0]:")); for (i = 0; i < 8; i++) { qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT1_RXB_PAUSE_THRS + (i * 0x4), &val); DEBUG2(pr_info("0x%x ", val)); } DEBUG2(pr_info("\n")); /* Port 0 RxB Traffic Class Max Cell Registers. */ DEBUG2(ql4_printk(KERN_INFO, ha, "Port 0 RxB Traffic Class Max Cell Registers[3..0]:")); for (i = 0; i < 4; i++) { qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT0_RXB_TC_MAX_CELL + (i * 0x4), &val); DEBUG2(pr_info("0x%x ", val)); } DEBUG2(pr_info("\n")); /* Port 1 RxB Traffic Class Max Cell Registers. */ DEBUG2(ql4_printk(KERN_INFO, ha, "Port 1 RxB Traffic Class Max Cell Registers[3..0]:")); for (i = 0; i < 4; i++) { qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT1_RXB_TC_MAX_CELL + (i * 0x4), &val); DEBUG2(pr_info("0x%x ", val)); } DEBUG2(pr_info("\n")); /* Port 0 RxB Rx Traffic Class Stats. */ DEBUG2(ql4_printk(KERN_INFO, ha, "Port 0 RxB Rx Traffic Class Stats [TC7..TC0]")); for (i = 7; i >= 0; i--) { qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT0_RXB_TC_STATS, &val); val &= ~(0x7 << 29); /* Reset bits 29 to 31 */ qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT0_RXB_TC_STATS, (val | (i << 29))); qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT0_RXB_TC_STATS, &val); DEBUG2(pr_info("0x%x ", val)); } DEBUG2(pr_info("\n")); /* Port 1 RxB Rx Traffic Class Stats. */ DEBUG2(ql4_printk(KERN_INFO, ha, "Port 1 RxB Rx Traffic Class Stats [TC7..TC0]")); for (i = 7; i >= 0; i--) { qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT1_RXB_TC_STATS, &val); val &= ~(0x7 << 29); /* Reset bits 29 to 31 */ qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT1_RXB_TC_STATS, (val | (i << 29))); qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT1_RXB_TC_STATS, &val); DEBUG2(pr_info("0x%x ", val)); } DEBUG2(pr_info("\n")); qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT2_IFB_PAUSE_THRS, &val); qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT3_IFB_PAUSE_THRS, &val1); DEBUG2(ql4_printk(KERN_INFO, ha, "IFB-Pause Thresholds: Port 2:0x%x, Port 3:0x%x\n", val, val1)); } static void __qla4_83xx_disable_pause(struct scsi_qla_host *ha) { int i; /* set SRE-Shim Control Register */ qla4_83xx_wr_reg_indirect(ha, QLA83XX_SRE_SHIM_CONTROL, QLA83XX_SET_PAUSE_VAL); for (i = 0; i < 8; i++) { /* Port 0 Rx Buffer Pause Threshold Registers. */ qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT0_RXB_PAUSE_THRS + (i * 0x4), QLA83XX_SET_PAUSE_VAL); /* Port 1 Rx Buffer Pause Threshold Registers. */ qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT1_RXB_PAUSE_THRS + (i * 0x4), QLA83XX_SET_PAUSE_VAL); } for (i = 0; i < 4; i++) { /* Port 0 RxB Traffic Class Max Cell Registers. */ qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT0_RXB_TC_MAX_CELL + (i * 0x4), QLA83XX_SET_TC_MAX_CELL_VAL); /* Port 1 RxB Traffic Class Max Cell Registers. */ qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT1_RXB_TC_MAX_CELL + (i * 0x4), QLA83XX_SET_TC_MAX_CELL_VAL); } qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT2_IFB_PAUSE_THRS, QLA83XX_SET_PAUSE_VAL); qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT3_IFB_PAUSE_THRS, QLA83XX_SET_PAUSE_VAL); ql4_printk(KERN_INFO, ha, "Disabled pause frames successfully.\n"); } /** * qla4_83xx_eport_init - Initialize EPort. * @ha: Pointer to host adapter structure. * * If EPort hardware is in reset state before disabling pause, there would be * serious hardware wedging issues. To prevent this perform eport init everytime * before disabling pause frames. **/ static void qla4_83xx_eport_init(struct scsi_qla_host *ha) { /* Clear the 8 registers */ qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_REG, 0x0); qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT0, 0x0); qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT1, 0x0); qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT2, 0x0); qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT3, 0x0); qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_SRE_SHIM, 0x0); qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_EPG_SHIM, 0x0); qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_ETHER_PCS, 0x0); /* Write any value to Reset Control register */ qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_CONTROL, 0xFF); ql4_printk(KERN_INFO, ha, "EPORT is out of reset.\n"); } void qla4_83xx_disable_pause(struct scsi_qla_host *ha) { ha->isp_ops->idc_lock(ha); /* Before disabling pause frames, ensure that eport is not in reset */ qla4_83xx_eport_init(ha); qla4_83xx_dump_pause_control_regs(ha); __qla4_83xx_disable_pause(ha); ha->isp_ops->idc_unlock(ha); } /** * qla4_83xx_is_detached - Check if we are marked invisible. * @ha: Pointer to host adapter structure. **/ int qla4_83xx_is_detached(struct scsi_qla_host *ha) { uint32_t drv_active; drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE); if (test_bit(AF_INIT_DONE, &ha->flags) && !(drv_active & (1 << ha->func_num))) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: drv_active = 0x%X\n", __func__, drv_active)); return QLA_SUCCESS; } return QLA_ERROR; }
linux-master
drivers/scsi/qla4xxx/ql4_83xx.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic iSCSI HBA Driver * Copyright (c) 2003-2013 QLogic Corporation */ #include <linux/ctype.h> #include "ql4_def.h" #include "ql4_glbl.h" #include "ql4_dbg.h" #include "ql4_inline.h" #include "ql4_version.h" void qla4xxx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd, int in_count) { int i; /* Load all mailbox registers, except mailbox 0. */ for (i = 1; i < in_count; i++) writel(mbx_cmd[i], &ha->reg->mailbox[i]); /* Wakeup firmware */ writel(mbx_cmd[0], &ha->reg->mailbox[0]); readl(&ha->reg->mailbox[0]); writel(set_rmask(CSR_INTR_RISC), &ha->reg->ctrl_status); readl(&ha->reg->ctrl_status); } void qla4xxx_process_mbox_intr(struct scsi_qla_host *ha, int out_count) { int intr_status; intr_status = readl(&ha->reg->ctrl_status); if (intr_status & INTR_PENDING) { /* * Service the interrupt. * The ISR will save the mailbox status registers * to a temporary storage location in the adapter structure. */ ha->mbox_status_count = out_count; ha->isp_ops->interrupt_service_routine(ha, intr_status); } } /** * qla4xxx_is_intr_poll_mode - Are we allowed to poll for interrupts? * @ha: Pointer to host adapter structure. * returns: 1=polling mode, 0=non-polling mode **/ static int qla4xxx_is_intr_poll_mode(struct scsi_qla_host *ha) { int rval = 1; if (is_qla8032(ha) || is_qla8042(ha)) { if (test_bit(AF_IRQ_ATTACHED, &ha->flags) && test_bit(AF_83XX_MBOX_INTR_ON, &ha->flags)) rval = 0; } else { if (test_bit(AF_IRQ_ATTACHED, &ha->flags) && test_bit(AF_INTERRUPTS_ON, &ha->flags) && test_bit(AF_ONLINE, &ha->flags) && !test_bit(AF_HA_REMOVAL, &ha->flags)) rval = 0; } return rval; } /** * qla4xxx_mailbox_command - issues mailbox commands * @ha: Pointer to host adapter structure. * @inCount: number of mailbox registers to load. * @outCount: number of mailbox registers to return. * @mbx_cmd: data pointer for mailbox in registers. * @mbx_sts: data pointer for mailbox out registers. * * This routine issue mailbox commands and waits for completion. * If outCount is 0, this routine completes successfully WITHOUT waiting * for the mailbox command to complete. **/ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount, uint8_t outCount, uint32_t *mbx_cmd, uint32_t *mbx_sts) { int status = QLA_ERROR; uint8_t i; u_long wait_count; unsigned long flags = 0; uint32_t dev_state; /* Make sure that pointers are valid */ if (!mbx_cmd || !mbx_sts) { DEBUG2(printk("scsi%ld: %s: Invalid mbx_cmd or mbx_sts " "pointer\n", ha->host_no, __func__)); return status; } if (is_qla40XX(ha)) { if (test_bit(AF_HA_REMOVAL, &ha->flags)) { DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: " "prematurely completing mbx cmd as " "adapter removal detected\n", ha->host_no, __func__)); return status; } } if ((is_aer_supported(ha)) && (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))) { DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Perm failure on EEH, " "timeout MBX Exiting.\n", ha->host_no, __func__)); return status; } /* Mailbox code active */ wait_count = MBOX_TOV * 100; while (wait_count--) { mutex_lock(&ha->mbox_sem); if (!test_bit(AF_MBOX_COMMAND, &ha->flags)) { set_bit(AF_MBOX_COMMAND, &ha->flags); mutex_unlock(&ha->mbox_sem); break; } mutex_unlock(&ha->mbox_sem); if (!wait_count) { DEBUG2(printk("scsi%ld: %s: mbox_sem failed\n", ha->host_no, __func__)); return status; } msleep(10); } if (is_qla80XX(ha)) { if (test_bit(AF_FW_RECOVERY, &ha->flags)) { DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: prematurely completing mbx cmd as firmware recovery detected\n", ha->host_no, __func__)); goto mbox_exit; } /* Do not send any mbx cmd if h/w is in failed state*/ ha->isp_ops->idc_lock(ha); dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE); ha->isp_ops->idc_unlock(ha); if (dev_state == QLA8XXX_DEV_FAILED) { ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: H/W is in failed state, do not send any mailbox commands\n", ha->host_no, __func__); goto mbox_exit; } } spin_lock_irqsave(&ha->hardware_lock, flags); ha->mbox_status_count = outCount; for (i = 0; i < outCount; i++) ha->mbox_status[i] = 0; /* Queue the mailbox command to the firmware */ ha->isp_ops->queue_mailbox_command(ha, mbx_cmd, inCount); spin_unlock_irqrestore(&ha->hardware_lock, flags); /* Wait for completion */ /* * If we don't want status, don't wait for the mailbox command to * complete. For example, MBOX_CMD_RESET_FW doesn't return status, * you must poll the inbound Interrupt Mask for completion. */ if (outCount == 0) { status = QLA_SUCCESS; goto mbox_exit; } /* * Wait for completion: Poll or completion queue */ if (qla4xxx_is_intr_poll_mode(ha)) { /* Poll for command to complete */ wait_count = jiffies + MBOX_TOV * HZ; while (test_bit(AF_MBOX_COMMAND_DONE, &ha->flags) == 0) { if (time_after_eq(jiffies, wait_count)) break; /* * Service the interrupt. * The ISR will save the mailbox status registers * to a temporary storage location in the adapter * structure. */ spin_lock_irqsave(&ha->hardware_lock, flags); ha->isp_ops->process_mailbox_interrupt(ha, outCount); spin_unlock_irqrestore(&ha->hardware_lock, flags); msleep(10); } } else { /* Do not poll for completion. Use completion queue */ set_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags); wait_for_completion_timeout(&ha->mbx_intr_comp, MBOX_TOV * HZ); clear_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags); } /* Check for mailbox timeout. */ if (!test_bit(AF_MBOX_COMMAND_DONE, &ha->flags)) { if (is_qla80XX(ha) && test_bit(AF_FW_RECOVERY, &ha->flags)) { DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: %s: prematurely completing mbx cmd as " "firmware recovery detected\n", ha->host_no, __func__)); goto mbox_exit; } ql4_printk(KERN_WARNING, ha, "scsi%ld: Mailbox Cmd 0x%08X timed out, Scheduling Adapter Reset\n", ha->host_no, mbx_cmd[0]); ha->mailbox_timeout_count++; mbx_sts[0] = (-1); set_bit(DPC_RESET_HA, &ha->dpc_flags); if (is_qla8022(ha)) { ql4_printk(KERN_INFO, ha, "disabling pause transmit on port 0 & 1.\n"); qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, CRB_NIU_XG_PAUSE_CTL_P0 | CRB_NIU_XG_PAUSE_CTL_P1); } else if (is_qla8032(ha) || is_qla8042(ha)) { ql4_printk(KERN_INFO, ha, " %s: disabling pause transmit on port 0 & 1.\n", __func__); qla4_83xx_disable_pause(ha); } goto mbox_exit; } /* * Copy the mailbox out registers to the caller's mailbox in/out * structure. */ spin_lock_irqsave(&ha->hardware_lock, flags); for (i = 0; i < outCount; i++) mbx_sts[i] = ha->mbox_status[i]; /* Set return status and error flags (if applicable). */ switch (ha->mbox_status[0]) { case MBOX_STS_COMMAND_COMPLETE: status = QLA_SUCCESS; break; case MBOX_STS_INTERMEDIATE_COMPLETION: status = QLA_SUCCESS; break; case MBOX_STS_BUSY: ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Cmd = %08X, ISP BUSY\n", ha->host_no, __func__, mbx_cmd[0]); ha->mailbox_timeout_count++; break; default: ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: FAILED, MBOX CMD = %08X, MBOX STS = %08X %08X %08X %08X %08X %08X %08X %08X\n", ha->host_no, __func__, mbx_cmd[0], mbx_sts[0], mbx_sts[1], mbx_sts[2], mbx_sts[3], mbx_sts[4], mbx_sts[5], mbx_sts[6], mbx_sts[7]); break; } spin_unlock_irqrestore(&ha->hardware_lock, flags); mbox_exit: mutex_lock(&ha->mbox_sem); clear_bit(AF_MBOX_COMMAND, &ha->flags); mutex_unlock(&ha->mbox_sem); clear_bit(AF_MBOX_COMMAND_DONE, &ha->flags); return status; } /** * qla4xxx_get_minidump_template - Get the firmware template * @ha: Pointer to host adapter structure. * @phys_addr: dma address for template * * Obtain the minidump template from firmware during initialization * as it may not be available when minidump is desired. **/ int qla4xxx_get_minidump_template(struct scsi_qla_host *ha, dma_addr_t phys_addr) { uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; int status; memset(&mbox_cmd, 0, sizeof(mbox_cmd)); memset(&mbox_sts, 0, sizeof(mbox_sts)); mbox_cmd[0] = MBOX_CMD_MINIDUMP; mbox_cmd[1] = MINIDUMP_GET_TMPLT_SUBCOMMAND; mbox_cmd[2] = LSDW(phys_addr); mbox_cmd[3] = MSDW(phys_addr); mbox_cmd[4] = ha->fw_dump_tmplt_size; mbox_cmd[5] = 0; status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0], &mbox_sts[0]); if (status != QLA_SUCCESS) { DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Cmd = %08X, mbx[0] = 0x%04x, mbx[1] = 0x%04x\n", ha->host_no, __func__, mbox_cmd[0], mbox_sts[0], mbox_sts[1])); } return status; } /** * qla4xxx_req_template_size - Get minidump template size from firmware. * @ha: Pointer to host adapter structure. **/ int qla4xxx_req_template_size(struct scsi_qla_host *ha) { uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; int status; memset(&mbox_cmd, 0, sizeof(mbox_cmd)); memset(&mbox_sts, 0, sizeof(mbox_sts)); mbox_cmd[0] = MBOX_CMD_MINIDUMP; mbox_cmd[1] = MINIDUMP_GET_SIZE_SUBCOMMAND; status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 8, &mbox_cmd[0], &mbox_sts[0]); if (status == QLA_SUCCESS) { ha->fw_dump_tmplt_size = mbox_sts[1]; DEBUG2(ql4_printk(KERN_INFO, ha, "%s: sts[0]=0x%04x, template size=0x%04x, size_cm_02=0x%04x, size_cm_04=0x%04x, size_cm_08=0x%04x, size_cm_10=0x%04x, size_cm_FF=0x%04x, version=0x%04x\n", __func__, mbox_sts[0], mbox_sts[1], mbox_sts[2], mbox_sts[3], mbox_sts[4], mbox_sts[5], mbox_sts[6], mbox_sts[7])); if (ha->fw_dump_tmplt_size == 0) status = QLA_ERROR; } else { ql4_printk(KERN_WARNING, ha, "%s: Error sts[0]=0x%04x, mbx[1]=0x%04x\n", __func__, mbox_sts[0], mbox_sts[1]); status = QLA_ERROR; } return status; } void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha) { set_bit(AF_FW_RECOVERY, &ha->flags); ql4_printk(KERN_INFO, ha, "scsi%ld: %s: set FW RECOVERY!\n", ha->host_no, __func__); if (test_bit(AF_MBOX_COMMAND, &ha->flags)) { if (test_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags)) { complete(&ha->mbx_intr_comp); ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Due to fw " "recovery, doing premature completion of " "mbx cmd\n", ha->host_no, __func__); } else { set_bit(AF_MBOX_COMMAND_DONE, &ha->flags); ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Due to fw " "recovery, doing premature completion of " "polling mbx cmd\n", ha->host_no, __func__); } } } static uint8_t qla4xxx_set_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd, uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma) { memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT); memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT); if (is_qla8022(ha)) qla4_82xx_wr_32(ha, ha->nx_db_wr_ptr, 0); mbox_cmd[0] = MBOX_CMD_INITIALIZE_FIRMWARE; mbox_cmd[1] = 0; mbox_cmd[2] = LSDW(init_fw_cb_dma); mbox_cmd[3] = MSDW(init_fw_cb_dma); mbox_cmd[4] = sizeof(struct addr_ctrl_blk); if (qla4xxx_mailbox_command(ha, 6, 6, mbox_cmd, mbox_sts) != QLA_SUCCESS) { DEBUG2(printk(KERN_WARNING "scsi%ld: %s: " "MBOX_CMD_INITIALIZE_FIRMWARE" " failed w/ status %04X\n", ha->host_no, __func__, mbox_sts[0])); return QLA_ERROR; } return QLA_SUCCESS; } uint8_t qla4xxx_get_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd, uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma) { memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT); memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT); mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK; mbox_cmd[2] = LSDW(init_fw_cb_dma); mbox_cmd[3] = MSDW(init_fw_cb_dma); mbox_cmd[4] = sizeof(struct addr_ctrl_blk); if (qla4xxx_mailbox_command(ha, 5, 5, mbox_cmd, mbox_sts) != QLA_SUCCESS) { DEBUG2(printk(KERN_WARNING "scsi%ld: %s: " "MBOX_CMD_GET_INIT_FW_CTRL_BLOCK" " failed w/ status %04X\n", ha->host_no, __func__, mbox_sts[0])); return QLA_ERROR; } return QLA_SUCCESS; } uint8_t qla4xxx_set_ipaddr_state(uint8_t fw_ipaddr_state) { uint8_t ipaddr_state; switch (fw_ipaddr_state) { case IP_ADDRSTATE_UNCONFIGURED: ipaddr_state = ISCSI_IPDDRESS_STATE_UNCONFIGURED; break; case IP_ADDRSTATE_INVALID: ipaddr_state = ISCSI_IPDDRESS_STATE_INVALID; break; case IP_ADDRSTATE_ACQUIRING: ipaddr_state = ISCSI_IPDDRESS_STATE_ACQUIRING; break; case IP_ADDRSTATE_TENTATIVE: ipaddr_state = ISCSI_IPDDRESS_STATE_TENTATIVE; break; case IP_ADDRSTATE_DEPRICATED: ipaddr_state = ISCSI_IPDDRESS_STATE_DEPRECATED; break; case IP_ADDRSTATE_PREFERRED: ipaddr_state = ISCSI_IPDDRESS_STATE_VALID; break; case IP_ADDRSTATE_DISABLING: ipaddr_state = ISCSI_IPDDRESS_STATE_DISABLING; break; default: ipaddr_state = ISCSI_IPDDRESS_STATE_UNCONFIGURED; } return ipaddr_state; } static void qla4xxx_update_local_ip(struct scsi_qla_host *ha, struct addr_ctrl_blk *init_fw_cb) { ha->ip_config.tcp_options = le16_to_cpu(init_fw_cb->ipv4_tcp_opts); ha->ip_config.ipv4_options = le16_to_cpu(init_fw_cb->ipv4_ip_opts); ha->ip_config.ipv4_addr_state = qla4xxx_set_ipaddr_state(init_fw_cb->ipv4_addr_state); ha->ip_config.eth_mtu_size = le16_to_cpu(init_fw_cb->eth_mtu_size); ha->ip_config.ipv4_port = le16_to_cpu(init_fw_cb->ipv4_port); if (ha->acb_version == ACB_SUPPORTED) { ha->ip_config.ipv6_options = le16_to_cpu(init_fw_cb->ipv6_opts); ha->ip_config.ipv6_addl_options = le16_to_cpu(init_fw_cb->ipv6_addtl_opts); ha->ip_config.ipv6_tcp_options = le16_to_cpu(init_fw_cb->ipv6_tcp_opts); } /* Save IPv4 Address Info */ memcpy(ha->ip_config.ip_address, init_fw_cb->ipv4_addr, min(sizeof(ha->ip_config.ip_address), sizeof(init_fw_cb->ipv4_addr))); memcpy(ha->ip_config.subnet_mask, init_fw_cb->ipv4_subnet, min(sizeof(ha->ip_config.subnet_mask), sizeof(init_fw_cb->ipv4_subnet))); memcpy(ha->ip_config.gateway, init_fw_cb->ipv4_gw_addr, min(sizeof(ha->ip_config.gateway), sizeof(init_fw_cb->ipv4_gw_addr))); ha->ip_config.ipv4_vlan_tag = be16_to_cpu(init_fw_cb->ipv4_vlan_tag); ha->ip_config.control = init_fw_cb->control; ha->ip_config.tcp_wsf = init_fw_cb->ipv4_tcp_wsf; ha->ip_config.ipv4_tos = init_fw_cb->ipv4_tos; ha->ip_config.ipv4_cache_id = init_fw_cb->ipv4_cacheid; ha->ip_config.ipv4_alt_cid_len = init_fw_cb->ipv4_dhcp_alt_cid_len; memcpy(ha->ip_config.ipv4_alt_cid, init_fw_cb->ipv4_dhcp_alt_cid, min(sizeof(ha->ip_config.ipv4_alt_cid), sizeof(init_fw_cb->ipv4_dhcp_alt_cid))); ha->ip_config.ipv4_vid_len = init_fw_cb->ipv4_dhcp_vid_len; memcpy(ha->ip_config.ipv4_vid, init_fw_cb->ipv4_dhcp_vid, min(sizeof(ha->ip_config.ipv4_vid), sizeof(init_fw_cb->ipv4_dhcp_vid))); ha->ip_config.ipv4_ttl = init_fw_cb->ipv4_ttl; ha->ip_config.def_timeout = le16_to_cpu(init_fw_cb->def_timeout); ha->ip_config.abort_timer = init_fw_cb->abort_timer; ha->ip_config.iscsi_options = le16_to_cpu(init_fw_cb->iscsi_opts); ha->ip_config.iscsi_max_pdu_size = le16_to_cpu(init_fw_cb->iscsi_max_pdu_size); ha->ip_config.iscsi_first_burst_len = le16_to_cpu(init_fw_cb->iscsi_fburst_len); ha->ip_config.iscsi_max_outstnd_r2t = le16_to_cpu(init_fw_cb->iscsi_max_outstnd_r2t); ha->ip_config.iscsi_max_burst_len = le16_to_cpu(init_fw_cb->iscsi_max_burst_len); memcpy(ha->ip_config.iscsi_name, init_fw_cb->iscsi_name, min(sizeof(ha->ip_config.iscsi_name), sizeof(init_fw_cb->iscsi_name))); if (is_ipv6_enabled(ha)) { /* Save IPv6 Address */ ha->ip_config.ipv6_link_local_state = qla4xxx_set_ipaddr_state(init_fw_cb->ipv6_lnk_lcl_addr_state); ha->ip_config.ipv6_addr0_state = qla4xxx_set_ipaddr_state(init_fw_cb->ipv6_addr0_state); ha->ip_config.ipv6_addr1_state = qla4xxx_set_ipaddr_state(init_fw_cb->ipv6_addr1_state); switch (le16_to_cpu(init_fw_cb->ipv6_dflt_rtr_state)) { case IPV6_RTRSTATE_UNKNOWN: ha->ip_config.ipv6_default_router_state = ISCSI_ROUTER_STATE_UNKNOWN; break; case IPV6_RTRSTATE_MANUAL: ha->ip_config.ipv6_default_router_state = ISCSI_ROUTER_STATE_MANUAL; break; case IPV6_RTRSTATE_ADVERTISED: ha->ip_config.ipv6_default_router_state = ISCSI_ROUTER_STATE_ADVERTISED; break; case IPV6_RTRSTATE_STALE: ha->ip_config.ipv6_default_router_state = ISCSI_ROUTER_STATE_STALE; break; default: ha->ip_config.ipv6_default_router_state = ISCSI_ROUTER_STATE_UNKNOWN; } ha->ip_config.ipv6_link_local_addr.in6_u.u6_addr8[0] = 0xFE; ha->ip_config.ipv6_link_local_addr.in6_u.u6_addr8[1] = 0x80; memcpy(&ha->ip_config.ipv6_link_local_addr.in6_u.u6_addr8[8], init_fw_cb->ipv6_if_id, min(sizeof(ha->ip_config.ipv6_link_local_addr)/2, sizeof(init_fw_cb->ipv6_if_id))); memcpy(&ha->ip_config.ipv6_addr0, init_fw_cb->ipv6_addr0, min(sizeof(ha->ip_config.ipv6_addr0), sizeof(init_fw_cb->ipv6_addr0))); memcpy(&ha->ip_config.ipv6_addr1, init_fw_cb->ipv6_addr1, min(sizeof(ha->ip_config.ipv6_addr1), sizeof(init_fw_cb->ipv6_addr1))); memcpy(&ha->ip_config.ipv6_default_router_addr, init_fw_cb->ipv6_dflt_rtr_addr, min(sizeof(ha->ip_config.ipv6_default_router_addr), sizeof(init_fw_cb->ipv6_dflt_rtr_addr))); ha->ip_config.ipv6_vlan_tag = be16_to_cpu(init_fw_cb->ipv6_vlan_tag); ha->ip_config.ipv6_port = le16_to_cpu(init_fw_cb->ipv6_port); ha->ip_config.ipv6_cache_id = init_fw_cb->ipv6_cache_id; ha->ip_config.ipv6_flow_lbl = le16_to_cpu(init_fw_cb->ipv6_flow_lbl); ha->ip_config.ipv6_traffic_class = init_fw_cb->ipv6_traffic_class; ha->ip_config.ipv6_hop_limit = init_fw_cb->ipv6_hop_limit; ha->ip_config.ipv6_nd_reach_time = le32_to_cpu(init_fw_cb->ipv6_nd_reach_time); ha->ip_config.ipv6_nd_rexmit_timer = le32_to_cpu(init_fw_cb->ipv6_nd_rexmit_timer); ha->ip_config.ipv6_nd_stale_timeout = le32_to_cpu(init_fw_cb->ipv6_nd_stale_timeout); ha->ip_config.ipv6_dup_addr_detect_count = init_fw_cb->ipv6_dup_addr_detect_count; ha->ip_config.ipv6_gw_advrt_mtu = le32_to_cpu(init_fw_cb->ipv6_gw_advrt_mtu); ha->ip_config.ipv6_tcp_wsf = init_fw_cb->ipv6_tcp_wsf; } } uint8_t qla4xxx_update_local_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd, uint32_t *mbox_sts, struct addr_ctrl_blk *init_fw_cb, dma_addr_t init_fw_cb_dma) { if (qla4xxx_get_ifcb(ha, mbox_cmd, mbox_sts, init_fw_cb_dma) != QLA_SUCCESS) { DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Failed to get init_fw_ctrl_blk\n", ha->host_no, __func__)); return QLA_ERROR; } DEBUG2(qla4xxx_dump_buffer(init_fw_cb, sizeof(struct addr_ctrl_blk))); /* Save some info in adapter structure. */ ha->acb_version = init_fw_cb->acb_version; ha->firmware_options = le16_to_cpu(init_fw_cb->fw_options); ha->heartbeat_interval = init_fw_cb->hb_interval; memcpy(ha->name_string, init_fw_cb->iscsi_name, min(sizeof(ha->name_string), sizeof(init_fw_cb->iscsi_name))); ha->def_timeout = le16_to_cpu(init_fw_cb->def_timeout); /*memcpy(ha->alias, init_fw_cb->Alias, min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/ qla4xxx_update_local_ip(ha, init_fw_cb); return QLA_SUCCESS; } /** * qla4xxx_initialize_fw_cb - initializes firmware control block. * @ha: Pointer to host adapter structure. **/ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha) { struct addr_ctrl_blk *init_fw_cb; dma_addr_t init_fw_cb_dma; uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; int status = QLA_ERROR; init_fw_cb = dma_alloc_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), &init_fw_cb_dma, GFP_KERNEL); if (init_fw_cb == NULL) { DEBUG2(printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no, __func__)); goto exit_init_fw_cb_no_free; } /* Get Initialize Firmware Control Block. */ memset(&mbox_cmd, 0, sizeof(mbox_cmd)); memset(&mbox_sts, 0, sizeof(mbox_sts)); if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) != QLA_SUCCESS) { goto exit_init_fw_cb; } /* Fill in the request and response queue information. */ init_fw_cb->rqq_consumer_idx = cpu_to_le16(ha->request_out); init_fw_cb->compq_producer_idx = cpu_to_le16(ha->response_in); init_fw_cb->rqq_len = cpu_to_le16(REQUEST_QUEUE_DEPTH); init_fw_cb->compq_len = cpu_to_le16(RESPONSE_QUEUE_DEPTH); init_fw_cb->rqq_addr_lo = cpu_to_le32(LSDW(ha->request_dma)); init_fw_cb->rqq_addr_hi = cpu_to_le32(MSDW(ha->request_dma)); init_fw_cb->compq_addr_lo = cpu_to_le32(LSDW(ha->response_dma)); init_fw_cb->compq_addr_hi = cpu_to_le32(MSDW(ha->response_dma)); init_fw_cb->shdwreg_addr_lo = cpu_to_le32(LSDW(ha->shadow_regs_dma)); init_fw_cb->shdwreg_addr_hi = cpu_to_le32(MSDW(ha->shadow_regs_dma)); /* Set up required options. */ init_fw_cb->fw_options |= cpu_to_le16(FWOPT_SESSION_MODE | FWOPT_INITIATOR_MODE); if (is_qla80XX(ha)) init_fw_cb->fw_options |= cpu_to_le16(FWOPT_ENABLE_CRBDB); init_fw_cb->fw_options &= cpu_to_le16(~FWOPT_TARGET_MODE); init_fw_cb->add_fw_options = 0; init_fw_cb->add_fw_options |= cpu_to_le16(ADFWOPT_SERIALIZE_TASK_MGMT); init_fw_cb->add_fw_options |= cpu_to_le16(ADFWOPT_AUTOCONN_DISABLE); if (qla4xxx_set_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) != QLA_SUCCESS) { DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Failed to set init_fw_ctrl_blk\n", ha->host_no, __func__)); goto exit_init_fw_cb; } if (qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb, init_fw_cb_dma) != QLA_SUCCESS) { DEBUG2(printk("scsi%ld: %s: Failed to update local ifcb\n", ha->host_no, __func__)); goto exit_init_fw_cb; } status = QLA_SUCCESS; exit_init_fw_cb: dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), init_fw_cb, init_fw_cb_dma); exit_init_fw_cb_no_free: return status; } /** * qla4xxx_get_dhcp_ip_address - gets HBA ip address via DHCP * @ha: Pointer to host adapter structure. **/ int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha) { struct addr_ctrl_blk *init_fw_cb; dma_addr_t init_fw_cb_dma; uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; init_fw_cb = dma_alloc_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), &init_fw_cb_dma, GFP_KERNEL); if (init_fw_cb == NULL) { printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no, __func__); return QLA_ERROR; } /* Get Initialize Firmware Control Block. */ if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) != QLA_SUCCESS) { DEBUG2(printk("scsi%ld: %s: Failed to get init_fw_ctrl_blk\n", ha->host_no, __func__)); dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), init_fw_cb, init_fw_cb_dma); return QLA_ERROR; } /* Save IP Address. */ qla4xxx_update_local_ip(ha, init_fw_cb); dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), init_fw_cb, init_fw_cb_dma); return QLA_SUCCESS; } /** * qla4xxx_get_firmware_state - gets firmware state of HBA * @ha: Pointer to host adapter structure. **/ int qla4xxx_get_firmware_state(struct scsi_qla_host * ha) { uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; /* Get firmware version */ memset(&mbox_cmd, 0, sizeof(mbox_cmd)); memset(&mbox_sts, 0, sizeof(mbox_sts)); mbox_cmd[0] = MBOX_CMD_GET_FW_STATE; if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 4, &mbox_cmd[0], &mbox_sts[0]) != QLA_SUCCESS) { DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_FW_STATE failed w/ " "status %04X\n", ha->host_no, __func__, mbox_sts[0])); return QLA_ERROR; } ha->firmware_state = mbox_sts[1]; ha->board_id = mbox_sts[2]; ha->addl_fw_state = mbox_sts[3]; DEBUG2(printk("scsi%ld: %s firmware_state=0x%x\n", ha->host_no, __func__, ha->firmware_state);) return QLA_SUCCESS; } /** * qla4xxx_get_firmware_status - retrieves firmware status * @ha: Pointer to host adapter structure. **/ int qla4xxx_get_firmware_status(struct scsi_qla_host * ha) { uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; /* Get firmware version */ memset(&mbox_cmd, 0, sizeof(mbox_cmd)); memset(&mbox_sts, 0, sizeof(mbox_sts)); mbox_cmd[0] = MBOX_CMD_GET_FW_STATUS; if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 3, &mbox_cmd[0], &mbox_sts[0]) != QLA_SUCCESS) { DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_FW_STATUS failed w/ " "status %04X\n", ha->host_no, __func__, mbox_sts[0])); return QLA_ERROR; } /* High-water mark of IOCBs */ ha->iocb_hiwat = mbox_sts[2]; DEBUG2(ql4_printk(KERN_INFO, ha, "%s: firmware IOCBs available = %d\n", __func__, ha->iocb_hiwat)); if (ha->iocb_hiwat > IOCB_HIWAT_CUSHION) ha->iocb_hiwat -= IOCB_HIWAT_CUSHION; /* Ideally, we should not enter this code, as the # of firmware * IOCBs is hard-coded in the firmware. We set a default * iocb_hiwat here just in case */ if (ha->iocb_hiwat == 0) { ha->iocb_hiwat = REQUEST_QUEUE_DEPTH / 4; DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: Setting IOCB's to = %d\n", __func__, ha->iocb_hiwat)); } return QLA_SUCCESS; } /* * qla4xxx_get_fwddb_entry - retrieves firmware ddb entry * @ha: Pointer to host adapter structure. * @fw_ddb_index: Firmware's device database index * @fw_ddb_entry: Pointer to firmware's device database entry structure * @num_valid_ddb_entries: Pointer to number of valid ddb entries * @next_ddb_index: Pointer to next valid device database index * @fw_ddb_device_state: Pointer to device state **/ int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha, uint16_t fw_ddb_index, struct dev_db_entry *fw_ddb_entry, dma_addr_t fw_ddb_entry_dma, uint32_t *num_valid_ddb_entries, uint32_t *next_ddb_index, uint32_t *fw_ddb_device_state, uint32_t *conn_err_detail, uint16_t *tcp_source_port_num, uint16_t *connection_id) { int status = QLA_ERROR; uint16_t options; uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; /* Make sure the device index is valid */ if (fw_ddb_index >= MAX_DDB_ENTRIES) { DEBUG2(printk("scsi%ld: %s: ddb [%d] out of range.\n", ha->host_no, __func__, fw_ddb_index)); goto exit_get_fwddb; } memset(&mbox_cmd, 0, sizeof(mbox_cmd)); memset(&mbox_sts, 0, sizeof(mbox_sts)); if (fw_ddb_entry) memset(fw_ddb_entry, 0, sizeof(struct dev_db_entry)); mbox_cmd[0] = MBOX_CMD_GET_DATABASE_ENTRY; mbox_cmd[1] = (uint32_t) fw_ddb_index; mbox_cmd[2] = LSDW(fw_ddb_entry_dma); mbox_cmd[3] = MSDW(fw_ddb_entry_dma); mbox_cmd[4] = sizeof(struct dev_db_entry); if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 7, &mbox_cmd[0], &mbox_sts[0]) == QLA_ERROR) { DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_DATABASE_ENTRY failed" " with status 0x%04X\n", ha->host_no, __func__, mbox_sts[0])); goto exit_get_fwddb; } if (fw_ddb_index != mbox_sts[1]) { DEBUG2(printk("scsi%ld: %s: ddb mismatch [%d] != [%d].\n", ha->host_no, __func__, fw_ddb_index, mbox_sts[1])); goto exit_get_fwddb; } if (fw_ddb_entry) { options = le16_to_cpu(fw_ddb_entry->options); if (options & DDB_OPT_IPV6_DEVICE) { ql4_printk(KERN_INFO, ha, "%s: DDB[%d] MB0 %04x Tot %d " "Next %d State %04x ConnErr %08x %pI6 " ":%04d \"%s\"\n", __func__, fw_ddb_index, mbox_sts[0], mbox_sts[2], mbox_sts[3], mbox_sts[4], mbox_sts[5], fw_ddb_entry->ip_addr, le16_to_cpu(fw_ddb_entry->port), fw_ddb_entry->iscsi_name); } else { ql4_printk(KERN_INFO, ha, "%s: DDB[%d] MB0 %04x Tot %d " "Next %d State %04x ConnErr %08x %pI4 " ":%04d \"%s\"\n", __func__, fw_ddb_index, mbox_sts[0], mbox_sts[2], mbox_sts[3], mbox_sts[4], mbox_sts[5], fw_ddb_entry->ip_addr, le16_to_cpu(fw_ddb_entry->port), fw_ddb_entry->iscsi_name); } } if (num_valid_ddb_entries) *num_valid_ddb_entries = mbox_sts[2]; if (next_ddb_index) *next_ddb_index = mbox_sts[3]; if (fw_ddb_device_state) *fw_ddb_device_state = mbox_sts[4]; /* * RA: This mailbox has been changed to pass connection error and * details. Its true for ISP4010 as per Version E - Not sure when it * was changed. Get the time2wait from the fw_dd_entry field : * default_time2wait which we call it as minTime2Wait DEV_DB_ENTRY * struct. */ if (conn_err_detail) *conn_err_detail = mbox_sts[5]; if (tcp_source_port_num) *tcp_source_port_num = (uint16_t) (mbox_sts[6] >> 16); if (connection_id) *connection_id = (uint16_t) mbox_sts[6] & 0x00FF; status = QLA_SUCCESS; exit_get_fwddb: return status; } int qla4xxx_conn_open(struct scsi_qla_host *ha, uint16_t fw_ddb_index) { uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; int status; memset(&mbox_cmd, 0, sizeof(mbox_cmd)); memset(&mbox_sts, 0, sizeof(mbox_sts)); mbox_cmd[0] = MBOX_CMD_CONN_OPEN; mbox_cmd[1] = fw_ddb_index; status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0], &mbox_sts[0]); DEBUG2(ql4_printk(KERN_INFO, ha, "%s: status = %d mbx0 = 0x%x mbx1 = 0x%x\n", __func__, status, mbox_sts[0], mbox_sts[1])); return status; } /** * qla4xxx_set_ddb_entry - sets a ddb entry. * @ha: Pointer to host adapter structure. * @fw_ddb_index: Firmware's device database index * @fw_ddb_entry_dma: dma address of ddb entry * @mbx_sts: mailbox 0 to be returned or NULL * * This routine initializes or updates the adapter's device database * entry for the specified device. **/ int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index, dma_addr_t fw_ddb_entry_dma, uint32_t *mbx_sts) { uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; int status; /* Do not wait for completion. The firmware will send us an * ASTS_DATABASE_CHANGED (0x8014) to notify us of the login status. */ memset(&mbox_cmd, 0, sizeof(mbox_cmd)); memset(&mbox_sts, 0, sizeof(mbox_sts)); mbox_cmd[0] = MBOX_CMD_SET_DATABASE_ENTRY; mbox_cmd[1] = (uint32_t) fw_ddb_index; mbox_cmd[2] = LSDW(fw_ddb_entry_dma); mbox_cmd[3] = MSDW(fw_ddb_entry_dma); mbox_cmd[4] = sizeof(struct dev_db_entry); status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]); if (mbx_sts) *mbx_sts = mbox_sts[0]; DEBUG2(printk("scsi%ld: %s: status=%d mbx0=0x%x mbx4=0x%x\n", ha->host_no, __func__, status, mbox_sts[0], mbox_sts[4]);) return status; } int qla4xxx_session_logout_ddb(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry, int options) { int status; uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; memset(&mbox_cmd, 0, sizeof(mbox_cmd)); memset(&mbox_sts, 0, sizeof(mbox_sts)); mbox_cmd[0] = MBOX_CMD_CONN_CLOSE_SESS_LOGOUT; mbox_cmd[1] = ddb_entry->fw_ddb_index; mbox_cmd[3] = options; status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0], &mbox_sts[0]); if (status != QLA_SUCCESS) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MBOX_CMD_CONN_CLOSE_SESS_LOGOUT " "failed sts %04X %04X", __func__, mbox_sts[0], mbox_sts[1])); if ((mbox_sts[0] == MBOX_STS_COMMAND_ERROR) && (mbox_sts[1] == DDB_NOT_LOGGED_IN)) { set_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags); } } return status; } /** * qla4xxx_get_crash_record - retrieves crash record. * @ha: Pointer to host adapter structure. * * This routine retrieves a crash record from the QLA4010 after an 8002h aen. **/ void qla4xxx_get_crash_record(struct scsi_qla_host * ha) { uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; struct crash_record *crash_record = NULL; dma_addr_t crash_record_dma = 0; uint32_t crash_record_size = 0; memset(&mbox_cmd, 0, sizeof(mbox_cmd)); memset(&mbox_sts, 0, sizeof(mbox_cmd)); /* Get size of crash record. */ mbox_cmd[0] = MBOX_CMD_GET_CRASH_RECORD; if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) != QLA_SUCCESS) { DEBUG2(printk("scsi%ld: %s: ERROR: Unable to retrieve size!\n", ha->host_no, __func__)); goto exit_get_crash_record; } crash_record_size = mbox_sts[4]; if (crash_record_size == 0) { DEBUG2(printk("scsi%ld: %s: ERROR: Crash record size is 0!\n", ha->host_no, __func__)); goto exit_get_crash_record; } /* Alloc Memory for Crash Record. */ crash_record = dma_alloc_coherent(&ha->pdev->dev, crash_record_size, &crash_record_dma, GFP_KERNEL); if (crash_record == NULL) goto exit_get_crash_record; /* Get Crash Record. */ memset(&mbox_cmd, 0, sizeof(mbox_cmd)); memset(&mbox_sts, 0, sizeof(mbox_cmd)); mbox_cmd[0] = MBOX_CMD_GET_CRASH_RECORD; mbox_cmd[2] = LSDW(crash_record_dma); mbox_cmd[3] = MSDW(crash_record_dma); mbox_cmd[4] = crash_record_size; if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) != QLA_SUCCESS) goto exit_get_crash_record; /* Dump Crash Record. */ exit_get_crash_record: if (crash_record) dma_free_coherent(&ha->pdev->dev, crash_record_size, crash_record, crash_record_dma); } /** * qla4xxx_get_conn_event_log - retrieves connection event log * @ha: Pointer to host adapter structure. **/ void qla4xxx_get_conn_event_log(struct scsi_qla_host * ha) { uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; struct conn_event_log_entry *event_log = NULL; dma_addr_t event_log_dma = 0; uint32_t event_log_size = 0; uint32_t num_valid_entries; uint32_t oldest_entry = 0; uint32_t max_event_log_entries; uint8_t i; memset(&mbox_cmd, 0, sizeof(mbox_cmd)); memset(&mbox_sts, 0, sizeof(mbox_cmd)); /* Get size of crash record. */ mbox_cmd[0] = MBOX_CMD_GET_CONN_EVENT_LOG; if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) != QLA_SUCCESS) goto exit_get_event_log; event_log_size = mbox_sts[4]; if (event_log_size == 0) goto exit_get_event_log; /* Alloc Memory for Crash Record. */ event_log = dma_alloc_coherent(&ha->pdev->dev, event_log_size, &event_log_dma, GFP_KERNEL); if (event_log == NULL) goto exit_get_event_log; /* Get Crash Record. */ memset(&mbox_cmd, 0, sizeof(mbox_cmd)); memset(&mbox_sts, 0, sizeof(mbox_cmd)); mbox_cmd[0] = MBOX_CMD_GET_CONN_EVENT_LOG; mbox_cmd[2] = LSDW(event_log_dma); mbox_cmd[3] = MSDW(event_log_dma); if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) != QLA_SUCCESS) { DEBUG2(printk("scsi%ld: %s: ERROR: Unable to retrieve event " "log!\n", ha->host_no, __func__)); goto exit_get_event_log; } /* Dump Event Log. */ num_valid_entries = mbox_sts[1]; max_event_log_entries = event_log_size / sizeof(struct conn_event_log_entry); if (num_valid_entries > max_event_log_entries) oldest_entry = num_valid_entries % max_event_log_entries; DEBUG3(printk("scsi%ld: Connection Event Log Dump (%d entries):\n", ha->host_no, num_valid_entries)); if (ql4xextended_error_logging == 3) { if (oldest_entry == 0) { /* Circular Buffer has not wrapped around */ for (i=0; i < num_valid_entries; i++) { qla4xxx_dump_buffer((uint8_t *)event_log+ (i*sizeof(*event_log)), sizeof(*event_log)); } } else { /* Circular Buffer has wrapped around - * display accordingly*/ for (i=oldest_entry; i < max_event_log_entries; i++) { qla4xxx_dump_buffer((uint8_t *)event_log+ (i*sizeof(*event_log)), sizeof(*event_log)); } for (i=0; i < oldest_entry; i++) { qla4xxx_dump_buffer((uint8_t *)event_log+ (i*sizeof(*event_log)), sizeof(*event_log)); } } } exit_get_event_log: if (event_log) dma_free_coherent(&ha->pdev->dev, event_log_size, event_log, event_log_dma); } /** * qla4xxx_abort_task - issues Abort Task * @ha: Pointer to host adapter structure. * @srb: Pointer to srb entry * * This routine performs a LUN RESET on the specified target/lun. * The caller must ensure that the ddb_entry and lun_entry pointers * are valid before calling this routine. **/ int qla4xxx_abort_task(struct scsi_qla_host *ha, struct srb *srb) { uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; struct scsi_cmnd *cmd = srb->cmd; int status = QLA_SUCCESS; unsigned long flags = 0; uint32_t index; /* * Send abort task command to ISP, so that the ISP will return * request with ABORT status */ memset(&mbox_cmd, 0, sizeof(mbox_cmd)); memset(&mbox_sts, 0, sizeof(mbox_sts)); spin_lock_irqsave(&ha->hardware_lock, flags); index = (unsigned long)(unsigned char *)cmd->host_scribble; spin_unlock_irqrestore(&ha->hardware_lock, flags); /* Firmware already posted completion on response queue */ if (index == MAX_SRBS) return status; mbox_cmd[0] = MBOX_CMD_ABORT_TASK; mbox_cmd[1] = srb->ddb->fw_ddb_index; mbox_cmd[2] = index; /* Immediate Command Enable */ mbox_cmd[5] = 0x01; qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]); if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE) { status = QLA_ERROR; DEBUG2(printk(KERN_WARNING "scsi%ld:%d:%llu: abort task FAILED: " "mbx0=%04X, mb1=%04X, mb2=%04X, mb3=%04X, mb4=%04X\n", ha->host_no, cmd->device->id, cmd->device->lun, mbox_sts[0], mbox_sts[1], mbox_sts[2], mbox_sts[3], mbox_sts[4])); } return status; } /** * qla4xxx_reset_lun - issues LUN Reset * @ha: Pointer to host adapter structure. * @ddb_entry: Pointer to device database entry * @lun: lun number * * This routine performs a LUN RESET on the specified target/lun. * The caller must ensure that the ddb_entry and lun_entry pointers * are valid before calling this routine. **/ int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry, uint64_t lun) { uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; uint32_t scsi_lun[2]; int status = QLA_SUCCESS; DEBUG2(printk("scsi%ld:%d:%llu: lun reset issued\n", ha->host_no, ddb_entry->fw_ddb_index, lun)); /* * Send lun reset command to ISP, so that the ISP will return all * outstanding requests with RESET status */ memset(&mbox_cmd, 0, sizeof(mbox_cmd)); memset(&mbox_sts, 0, sizeof(mbox_sts)); int_to_scsilun(lun, (struct scsi_lun *) scsi_lun); mbox_cmd[0] = MBOX_CMD_LUN_RESET; mbox_cmd[1] = ddb_entry->fw_ddb_index; /* FW expects LUN bytes 0-3 in Incoming Mailbox 2 * (LUN byte 0 is LSByte, byte 3 is MSByte) */ mbox_cmd[2] = cpu_to_le32(scsi_lun[0]); /* FW expects LUN bytes 4-7 in Incoming Mailbox 3 * (LUN byte 4 is LSByte, byte 7 is MSByte) */ mbox_cmd[3] = cpu_to_le32(scsi_lun[1]); mbox_cmd[5] = 0x01; /* Immediate Command Enable */ qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]); if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE && mbox_sts[0] != MBOX_STS_COMMAND_ERROR) status = QLA_ERROR; return status; } /** * qla4xxx_reset_target - issues target Reset * @ha: Pointer to host adapter structure. * @ddb_entry: Pointer to device database entry * * This routine performs a TARGET RESET on the specified target. * The caller must ensure that the ddb_entry pointers * are valid before calling this routine. **/ int qla4xxx_reset_target(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry) { uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; int status = QLA_SUCCESS; DEBUG2(printk("scsi%ld:%d: target reset issued\n", ha->host_no, ddb_entry->fw_ddb_index)); /* * Send target reset command to ISP, so that the ISP will return all * outstanding requests with RESET status */ memset(&mbox_cmd, 0, sizeof(mbox_cmd)); memset(&mbox_sts, 0, sizeof(mbox_sts)); mbox_cmd[0] = MBOX_CMD_TARGET_WARM_RESET; mbox_cmd[1] = ddb_entry->fw_ddb_index; mbox_cmd[5] = 0x01; /* Immediate Command Enable */ qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]); if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE && mbox_sts[0] != MBOX_STS_COMMAND_ERROR) status = QLA_ERROR; return status; } int qla4xxx_get_flash(struct scsi_qla_host * ha, dma_addr_t dma_addr, uint32_t offset, uint32_t len) { uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; memset(&mbox_cmd, 0, sizeof(mbox_cmd)); memset(&mbox_sts, 0, sizeof(mbox_sts)); mbox_cmd[0] = MBOX_CMD_READ_FLASH; mbox_cmd[1] = LSDW(dma_addr); mbox_cmd[2] = MSDW(dma_addr); mbox_cmd[3] = offset; mbox_cmd[4] = len; if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0], &mbox_sts[0]) != QLA_SUCCESS) { DEBUG2(printk("scsi%ld: %s: MBOX_CMD_READ_FLASH, failed w/ " "status %04X %04X, offset %08x, len %08x\n", ha->host_no, __func__, mbox_sts[0], mbox_sts[1], offset, len)); return QLA_ERROR; } return QLA_SUCCESS; } /** * qla4xxx_about_firmware - gets FW, iscsi draft and boot loader version * @ha: Pointer to host adapter structure. * * Retrieves the FW version, iSCSI draft version & bootloader version of HBA. * Mailboxes 2 & 3 may hold an address for data. Make sure that we write 0 to * those mailboxes, if unused. **/ int qla4xxx_about_firmware(struct scsi_qla_host *ha) { struct about_fw_info *about_fw = NULL; dma_addr_t about_fw_dma; uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; int status = QLA_ERROR; about_fw = dma_alloc_coherent(&ha->pdev->dev, sizeof(struct about_fw_info), &about_fw_dma, GFP_KERNEL); if (!about_fw) { DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to alloc memory " "for about_fw\n", __func__)); return status; } memset(&mbox_cmd, 0, sizeof(mbox_cmd)); memset(&mbox_sts, 0, sizeof(mbox_sts)); mbox_cmd[0] = MBOX_CMD_ABOUT_FW; mbox_cmd[2] = LSDW(about_fw_dma); mbox_cmd[3] = MSDW(about_fw_dma); mbox_cmd[4] = sizeof(struct about_fw_info); status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT, &mbox_cmd[0], &mbox_sts[0]); if (status != QLA_SUCCESS) { DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_ABOUT_FW " "failed w/ status %04X\n", __func__, mbox_sts[0])); goto exit_about_fw; } /* Save version information. */ ha->fw_info.fw_major = le16_to_cpu(about_fw->fw_major); ha->fw_info.fw_minor = le16_to_cpu(about_fw->fw_minor); ha->fw_info.fw_patch = le16_to_cpu(about_fw->fw_patch); ha->fw_info.fw_build = le16_to_cpu(about_fw->fw_build); memcpy(ha->fw_info.fw_build_date, about_fw->fw_build_date, sizeof(about_fw->fw_build_date)); memcpy(ha->fw_info.fw_build_time, about_fw->fw_build_time, sizeof(about_fw->fw_build_time)); strcpy((char *)ha->fw_info.fw_build_user, skip_spaces((char *)about_fw->fw_build_user)); ha->fw_info.fw_load_source = le16_to_cpu(about_fw->fw_load_source); ha->fw_info.iscsi_major = le16_to_cpu(about_fw->iscsi_major); ha->fw_info.iscsi_minor = le16_to_cpu(about_fw->iscsi_minor); ha->fw_info.bootload_major = le16_to_cpu(about_fw->bootload_major); ha->fw_info.bootload_minor = le16_to_cpu(about_fw->bootload_minor); ha->fw_info.bootload_patch = le16_to_cpu(about_fw->bootload_patch); ha->fw_info.bootload_build = le16_to_cpu(about_fw->bootload_build); strcpy((char *)ha->fw_info.extended_timestamp, skip_spaces((char *)about_fw->extended_timestamp)); ha->fw_uptime_secs = le32_to_cpu(mbox_sts[5]); ha->fw_uptime_msecs = le32_to_cpu(mbox_sts[6]); status = QLA_SUCCESS; exit_about_fw: dma_free_coherent(&ha->pdev->dev, sizeof(struct about_fw_info), about_fw, about_fw_dma); return status; } int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, uint32_t options, dma_addr_t dma_addr) { uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; memset(&mbox_cmd, 0, sizeof(mbox_cmd)); memset(&mbox_sts, 0, sizeof(mbox_sts)); mbox_cmd[0] = MBOX_CMD_GET_DATABASE_ENTRY_DEFAULTS; mbox_cmd[1] = options; mbox_cmd[2] = LSDW(dma_addr); mbox_cmd[3] = MSDW(dma_addr); if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]) != QLA_SUCCESS) { DEBUG2(printk("scsi%ld: %s: failed status %04X\n", ha->host_no, __func__, mbox_sts[0])); return QLA_ERROR; } return QLA_SUCCESS; } int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t ddb_index, uint32_t *mbx_sts) { int status; uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; memset(&mbox_cmd, 0, sizeof(mbox_cmd)); memset(&mbox_sts, 0, sizeof(mbox_sts)); mbox_cmd[0] = MBOX_CMD_REQUEST_DATABASE_ENTRY; mbox_cmd[1] = ddb_index; status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]); if (status != QLA_SUCCESS) { DEBUG2(ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", __func__, mbox_sts[0])); } *mbx_sts = mbox_sts[0]; return status; } int qla4xxx_clear_ddb_entry(struct scsi_qla_host *ha, uint32_t ddb_index) { int status; uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; memset(&mbox_cmd, 0, sizeof(mbox_cmd)); memset(&mbox_sts, 0, sizeof(mbox_sts)); mbox_cmd[0] = MBOX_CMD_CLEAR_DATABASE_ENTRY; mbox_cmd[1] = ddb_index; status = qla4xxx_mailbox_command(ha, 2, 1, &mbox_cmd[0], &mbox_sts[0]); if (status != QLA_SUCCESS) { DEBUG2(ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", __func__, mbox_sts[0])); } return status; } int qla4xxx_set_flash(struct scsi_qla_host *ha, dma_addr_t dma_addr, uint32_t offset, uint32_t length, uint32_t options) { uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; int status = QLA_SUCCESS; memset(&mbox_cmd, 0, sizeof(mbox_cmd)); memset(&mbox_sts, 0, sizeof(mbox_sts)); mbox_cmd[0] = MBOX_CMD_WRITE_FLASH; mbox_cmd[1] = LSDW(dma_addr); mbox_cmd[2] = MSDW(dma_addr); mbox_cmd[3] = offset; mbox_cmd[4] = length; mbox_cmd[5] = options; status = qla4xxx_mailbox_command(ha, 6, 2, &mbox_cmd[0], &mbox_sts[0]); if (status != QLA_SUCCESS) { DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_WRITE_FLASH " "failed w/ status %04X, mbx1 %04X\n", __func__, mbox_sts[0], mbox_sts[1])); } return status; } int qla4xxx_bootdb_by_index(struct scsi_qla_host *ha, struct dev_db_entry *fw_ddb_entry, dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index) { uint32_t dev_db_start_offset = FLASH_OFFSET_DB_INFO; uint32_t dev_db_end_offset; int status = QLA_ERROR; memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry)); dev_db_start_offset += (ddb_index * sizeof(*fw_ddb_entry)); dev_db_end_offset = FLASH_OFFSET_DB_END; if (dev_db_start_offset > dev_db_end_offset) { DEBUG2(ql4_printk(KERN_ERR, ha, "%s:Invalid DDB index %d", __func__, ddb_index)); goto exit_bootdb_failed; } if (qla4xxx_get_flash(ha, fw_ddb_entry_dma, dev_db_start_offset, sizeof(*fw_ddb_entry)) != QLA_SUCCESS) { ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash" "failed\n", ha->host_no, __func__); goto exit_bootdb_failed; } if (fw_ddb_entry->cookie == DDB_VALID_COOKIE) status = QLA_SUCCESS; exit_bootdb_failed: return status; } int qla4xxx_flashdb_by_index(struct scsi_qla_host *ha, struct dev_db_entry *fw_ddb_entry, dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index) { uint32_t dev_db_start_offset; uint32_t dev_db_end_offset; int status = QLA_ERROR; memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry)); if (is_qla40XX(ha)) { dev_db_start_offset = FLASH_OFFSET_DB_INFO; dev_db_end_offset = FLASH_OFFSET_DB_END; } else { dev_db_start_offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_ddb << 2); /* flt_ddb_size is DDB table size for both ports * so divide it by 2 to calculate the offset for second port */ if (ha->port_num == 1) dev_db_start_offset += (ha->hw.flt_ddb_size / 2); dev_db_end_offset = dev_db_start_offset + (ha->hw.flt_ddb_size / 2); } dev_db_start_offset += (ddb_index * sizeof(*fw_ddb_entry)); if (dev_db_start_offset > dev_db_end_offset) { DEBUG2(ql4_printk(KERN_ERR, ha, "%s:Invalid DDB index %d", __func__, ddb_index)); goto exit_fdb_failed; } if (qla4xxx_get_flash(ha, fw_ddb_entry_dma, dev_db_start_offset, sizeof(*fw_ddb_entry)) != QLA_SUCCESS) { ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash failed\n", ha->host_no, __func__); goto exit_fdb_failed; } if (fw_ddb_entry->cookie == DDB_VALID_COOKIE) status = QLA_SUCCESS; exit_fdb_failed: return status; } int qla4xxx_get_chap(struct scsi_qla_host *ha, char *username, char *password, uint16_t idx) { int ret = 0; int rval = QLA_ERROR; uint32_t offset = 0, chap_size; struct ql4_chap_table *chap_table; dma_addr_t chap_dma; chap_table = dma_pool_zalloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma); if (chap_table == NULL) return -ENOMEM; chap_size = sizeof(struct ql4_chap_table); if (is_qla40XX(ha)) offset = FLASH_CHAP_OFFSET | (idx * chap_size); else { offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2); /* flt_chap_size is CHAP table size for both ports * so divide it by 2 to calculate the offset for second port */ if (ha->port_num == 1) offset += (ha->hw.flt_chap_size / 2); offset += (idx * chap_size); } rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size); if (rval != QLA_SUCCESS) { ret = -EINVAL; goto exit_get_chap; } DEBUG2(ql4_printk(KERN_INFO, ha, "Chap Cookie: x%x\n", __le16_to_cpu(chap_table->cookie))); if (__le16_to_cpu(chap_table->cookie) != CHAP_VALID_COOKIE) { ql4_printk(KERN_ERR, ha, "No valid chap entry found\n"); goto exit_get_chap; } strscpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN); strscpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN); chap_table->cookie = cpu_to_le16(CHAP_VALID_COOKIE); exit_get_chap: dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma); return ret; } /** * qla4xxx_set_chap - Make a chap entry at the given index * @ha: pointer to adapter structure * @username: CHAP username to set * @password: CHAP password to set * @idx: CHAP index at which to make the entry * @bidi: type of chap entry (chap_in or chap_out) * * Create chap entry at the given index with the information provided. * * Note: Caller should acquire the chap lock before getting here. **/ int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, char *password, uint16_t idx, int bidi) { int ret = 0; int rval = QLA_ERROR; uint32_t offset = 0; struct ql4_chap_table *chap_table; uint32_t chap_size = 0; dma_addr_t chap_dma; chap_table = dma_pool_zalloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma); if (chap_table == NULL) { ret = -ENOMEM; goto exit_set_chap; } if (bidi) chap_table->flags |= BIT_6; /* peer */ else chap_table->flags |= BIT_7; /* local */ chap_table->secret_len = strlen(password); strncpy(chap_table->secret, password, MAX_CHAP_SECRET_LEN - 1); strncpy(chap_table->name, username, MAX_CHAP_NAME_LEN - 1); chap_table->cookie = cpu_to_le16(CHAP_VALID_COOKIE); if (is_qla40XX(ha)) { chap_size = MAX_CHAP_ENTRIES_40XX * sizeof(*chap_table); offset = FLASH_CHAP_OFFSET; } else { /* Single region contains CHAP info for both ports which is * divided into half for each port. */ chap_size = ha->hw.flt_chap_size / 2; offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2); if (ha->port_num == 1) offset += chap_size; } offset += (idx * sizeof(struct ql4_chap_table)); rval = qla4xxx_set_flash(ha, chap_dma, offset, sizeof(struct ql4_chap_table), FLASH_OPT_RMW_COMMIT); if (rval == QLA_SUCCESS && ha->chap_list) { /* Update ha chap_list cache */ memcpy((struct ql4_chap_table *)ha->chap_list + idx, chap_table, sizeof(struct ql4_chap_table)); } dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma); if (rval != QLA_SUCCESS) ret = -EINVAL; exit_set_chap: return ret; } int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha, char *username, char *password, uint16_t chap_index) { int rval = QLA_ERROR; struct ql4_chap_table *chap_table = NULL; int max_chap_entries; if (!ha->chap_list) { ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n"); rval = QLA_ERROR; goto exit_uni_chap; } if (!username || !password) { ql4_printk(KERN_ERR, ha, "No memory for username & secret\n"); rval = QLA_ERROR; goto exit_uni_chap; } if (is_qla80XX(ha)) max_chap_entries = (ha->hw.flt_chap_size / 2) / sizeof(struct ql4_chap_table); else max_chap_entries = MAX_CHAP_ENTRIES_40XX; if (chap_index > max_chap_entries) { ql4_printk(KERN_ERR, ha, "Invalid Chap index\n"); rval = QLA_ERROR; goto exit_uni_chap; } mutex_lock(&ha->chap_sem); chap_table = (struct ql4_chap_table *)ha->chap_list + chap_index; if (chap_table->cookie != cpu_to_le16(CHAP_VALID_COOKIE)) { rval = QLA_ERROR; goto exit_unlock_uni_chap; } if (!(chap_table->flags & BIT_7)) { ql4_printk(KERN_ERR, ha, "Unidirectional entry not set\n"); rval = QLA_ERROR; goto exit_unlock_uni_chap; } strscpy(password, chap_table->secret, MAX_CHAP_SECRET_LEN); strscpy(username, chap_table->name, MAX_CHAP_NAME_LEN); rval = QLA_SUCCESS; exit_unlock_uni_chap: mutex_unlock(&ha->chap_sem); exit_uni_chap: return rval; } /** * qla4xxx_get_chap_index - Get chap index given username and secret * @ha: pointer to adapter structure * @username: CHAP username to be searched * @password: CHAP password to be searched * @bidi: Is this a BIDI CHAP * @chap_index: CHAP index to be returned * * Match the username and password in the chap_list, return the index if a * match is found. If a match is not found then add the entry in FLASH and * return the index at which entry is written in the FLASH. **/ int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username, char *password, int bidi, uint16_t *chap_index) { int i, rval; int free_index = -1; int found_index = 0; int max_chap_entries = 0; struct ql4_chap_table *chap_table; if (is_qla80XX(ha)) max_chap_entries = (ha->hw.flt_chap_size / 2) / sizeof(struct ql4_chap_table); else max_chap_entries = MAX_CHAP_ENTRIES_40XX; if (!ha->chap_list) { ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n"); return QLA_ERROR; } if (!username || !password) { ql4_printk(KERN_ERR, ha, "Do not have username and psw\n"); return QLA_ERROR; } mutex_lock(&ha->chap_sem); for (i = 0; i < max_chap_entries; i++) { chap_table = (struct ql4_chap_table *)ha->chap_list + i; if (chap_table->cookie != cpu_to_le16(CHAP_VALID_COOKIE)) { if (i > MAX_RESRV_CHAP_IDX && free_index == -1) free_index = i; continue; } if (bidi) { if (chap_table->flags & BIT_7) continue; } else { if (chap_table->flags & BIT_6) continue; } if (!strncmp(chap_table->secret, password, MAX_CHAP_SECRET_LEN) && !strncmp(chap_table->name, username, MAX_CHAP_NAME_LEN)) { *chap_index = i; found_index = 1; break; } } /* If chap entry is not present and a free index is available then * write the entry in flash */ if (!found_index && free_index != -1) { rval = qla4xxx_set_chap(ha, username, password, free_index, bidi); if (!rval) { *chap_index = free_index; found_index = 1; } } mutex_unlock(&ha->chap_sem); if (found_index) return QLA_SUCCESS; return QLA_ERROR; } int qla4xxx_conn_close_sess_logout(struct scsi_qla_host *ha, uint16_t fw_ddb_index, uint16_t connection_id, uint16_t option) { uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; int status = QLA_SUCCESS; memset(&mbox_cmd, 0, sizeof(mbox_cmd)); memset(&mbox_sts, 0, sizeof(mbox_sts)); mbox_cmd[0] = MBOX_CMD_CONN_CLOSE_SESS_LOGOUT; mbox_cmd[1] = fw_ddb_index; mbox_cmd[2] = connection_id; mbox_cmd[3] = option; status = qla4xxx_mailbox_command(ha, 4, 2, &mbox_cmd[0], &mbox_sts[0]); if (status != QLA_SUCCESS) { DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_CONN_CLOSE " "option %04x failed w/ status %04X %04X\n", __func__, option, mbox_sts[0], mbox_sts[1])); } return status; } /** * qla4_84xx_extend_idc_tmo - Extend IDC Timeout. * @ha: Pointer to host adapter structure. * @ext_tmo: idc timeout value * * Requests firmware to extend the idc timeout value. **/ static int qla4_84xx_extend_idc_tmo(struct scsi_qla_host *ha, uint32_t ext_tmo) { uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; int status; memset(&mbox_cmd, 0, sizeof(mbox_cmd)); memset(&mbox_sts, 0, sizeof(mbox_sts)); ext_tmo &= 0xf; mbox_cmd[0] = MBOX_CMD_IDC_TIME_EXTEND; mbox_cmd[1] = ((ha->idc_info.request_desc & 0xfffff0ff) | (ext_tmo << 8)); /* new timeout */ mbox_cmd[2] = ha->idc_info.info1; mbox_cmd[3] = ha->idc_info.info2; mbox_cmd[4] = ha->idc_info.info3; status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT, mbox_cmd, mbox_sts); if (status != QLA_SUCCESS) { DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: %s: failed status %04X\n", ha->host_no, __func__, mbox_sts[0])); return QLA_ERROR; } else { ql4_printk(KERN_INFO, ha, "%s: IDC timeout extended by %d secs\n", __func__, ext_tmo); } return QLA_SUCCESS; } int qla4xxx_disable_acb(struct scsi_qla_host *ha) { uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; int status = QLA_SUCCESS; memset(&mbox_cmd, 0, sizeof(mbox_cmd)); memset(&mbox_sts, 0, sizeof(mbox_sts)); mbox_cmd[0] = MBOX_CMD_DISABLE_ACB; status = qla4xxx_mailbox_command(ha, 8, 5, &mbox_cmd[0], &mbox_sts[0]); if (status != QLA_SUCCESS) { DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_DISABLE_ACB " "failed w/ status %04X %04X %04X", __func__, mbox_sts[0], mbox_sts[1], mbox_sts[2])); } else { if (is_qla8042(ha) && test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags) && (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE)) { /* * Disable ACB mailbox command takes time to complete * based on the total number of targets connected. * For 512 targets, it took approximately 5 secs to * complete. Setting the timeout value to 8, with the 3 * secs buffer. */ qla4_84xx_extend_idc_tmo(ha, IDC_EXTEND_TOV); if (!wait_for_completion_timeout(&ha->disable_acb_comp, IDC_EXTEND_TOV * HZ)) { ql4_printk(KERN_WARNING, ha, "%s: Disable ACB Completion not received\n", __func__); } } } return status; } int qla4xxx_get_acb(struct scsi_qla_host *ha, dma_addr_t acb_dma, uint32_t acb_type, uint32_t len) { uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; int status = QLA_SUCCESS; memset(&mbox_cmd, 0, sizeof(mbox_cmd)); memset(&mbox_sts, 0, sizeof(mbox_sts)); mbox_cmd[0] = MBOX_CMD_GET_ACB; mbox_cmd[1] = acb_type; mbox_cmd[2] = LSDW(acb_dma); mbox_cmd[3] = MSDW(acb_dma); mbox_cmd[4] = len; status = qla4xxx_mailbox_command(ha, 5, 5, &mbox_cmd[0], &mbox_sts[0]); if (status != QLA_SUCCESS) { DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_GET_ACB " "failed w/ status %04X\n", __func__, mbox_sts[0])); } return status; } int qla4xxx_set_acb(struct scsi_qla_host *ha, uint32_t *mbox_cmd, uint32_t *mbox_sts, dma_addr_t acb_dma) { int status = QLA_SUCCESS; memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT); memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT); mbox_cmd[0] = MBOX_CMD_SET_ACB; mbox_cmd[1] = 0; /* Primary ACB */ mbox_cmd[2] = LSDW(acb_dma); mbox_cmd[3] = MSDW(acb_dma); mbox_cmd[4] = sizeof(struct addr_ctrl_blk); status = qla4xxx_mailbox_command(ha, 5, 5, &mbox_cmd[0], &mbox_sts[0]); if (status != QLA_SUCCESS) { DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_SET_ACB " "failed w/ status %04X\n", __func__, mbox_sts[0])); } return status; } int qla4xxx_set_param_ddbentry(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry, struct iscsi_cls_conn *cls_conn, uint32_t *mbx_sts) { struct dev_db_entry *fw_ddb_entry; struct iscsi_conn *conn; struct iscsi_session *sess; struct qla_conn *qla_conn; struct sockaddr *dst_addr; dma_addr_t fw_ddb_entry_dma; int status = QLA_SUCCESS; int rval = 0; struct sockaddr_in *addr; struct sockaddr_in6 *addr6; char *ip; uint16_t iscsi_opts = 0; uint32_t options = 0; uint16_t idx, *ptid; fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), &fw_ddb_entry_dma, GFP_KERNEL); if (!fw_ddb_entry) { DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to allocate dma buffer.\n", __func__)); rval = -ENOMEM; goto exit_set_param_no_free; } conn = cls_conn->dd_data; qla_conn = conn->dd_data; sess = conn->session; dst_addr = (struct sockaddr *)&qla_conn->qla_ep->dst_addr; if (dst_addr->sa_family == AF_INET6) options |= IPV6_DEFAULT_DDB_ENTRY; status = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma); if (status == QLA_ERROR) { rval = -EINVAL; goto exit_set_param; } ptid = (uint16_t *)&fw_ddb_entry->isid[1]; *ptid = cpu_to_le16((uint16_t)ddb_entry->sess->target_id); DEBUG2(ql4_printk(KERN_INFO, ha, "ISID [%pmR]\n", fw_ddb_entry->isid)); iscsi_opts = le16_to_cpu(fw_ddb_entry->iscsi_options); memset(fw_ddb_entry->iscsi_alias, 0, sizeof(fw_ddb_entry->iscsi_alias)); memset(fw_ddb_entry->iscsi_name, 0, sizeof(fw_ddb_entry->iscsi_name)); if (sess->targetname != NULL) { memcpy(fw_ddb_entry->iscsi_name, sess->targetname, min(strlen(sess->targetname), sizeof(fw_ddb_entry->iscsi_name))); } memset(fw_ddb_entry->ip_addr, 0, sizeof(fw_ddb_entry->ip_addr)); memset(fw_ddb_entry->tgt_addr, 0, sizeof(fw_ddb_entry->tgt_addr)); fw_ddb_entry->options = DDB_OPT_TARGET | DDB_OPT_AUTO_SENDTGTS_DISABLE; if (dst_addr->sa_family == AF_INET) { addr = (struct sockaddr_in *)dst_addr; ip = (char *)&addr->sin_addr; memcpy(fw_ddb_entry->ip_addr, ip, IP_ADDR_LEN); fw_ddb_entry->port = cpu_to_le16(ntohs(addr->sin_port)); DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Destination Address [%pI4]: index [%d]\n", __func__, fw_ddb_entry->ip_addr, ddb_entry->fw_ddb_index)); } else if (dst_addr->sa_family == AF_INET6) { addr6 = (struct sockaddr_in6 *)dst_addr; ip = (char *)&addr6->sin6_addr; memcpy(fw_ddb_entry->ip_addr, ip, IPv6_ADDR_LEN); fw_ddb_entry->port = cpu_to_le16(ntohs(addr6->sin6_port)); fw_ddb_entry->options |= DDB_OPT_IPV6_DEVICE; DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Destination Address [%pI6]: index [%d]\n", __func__, fw_ddb_entry->ip_addr, ddb_entry->fw_ddb_index)); } else { ql4_printk(KERN_ERR, ha, "%s: Failed to get IP Address\n", __func__); rval = -EINVAL; goto exit_set_param; } /* CHAP */ if (sess->username != NULL && sess->password != NULL) { if (strlen(sess->username) && strlen(sess->password)) { iscsi_opts |= BIT_7; rval = qla4xxx_get_chap_index(ha, sess->username, sess->password, LOCAL_CHAP, &idx); if (rval) goto exit_set_param; fw_ddb_entry->chap_tbl_idx = cpu_to_le16(idx); } } if (sess->username_in != NULL && sess->password_in != NULL) { /* Check if BIDI CHAP */ if (strlen(sess->username_in) && strlen(sess->password_in)) { iscsi_opts |= BIT_4; rval = qla4xxx_get_chap_index(ha, sess->username_in, sess->password_in, BIDI_CHAP, &idx); if (rval) goto exit_set_param; } } if (sess->initial_r2t_en) iscsi_opts |= BIT_10; if (sess->imm_data_en) iscsi_opts |= BIT_11; fw_ddb_entry->iscsi_options = cpu_to_le16(iscsi_opts); if (conn->max_recv_dlength) fw_ddb_entry->iscsi_max_rcv_data_seg_len = cpu_to_le16((conn->max_recv_dlength / BYTE_UNITS)); if (sess->max_r2t) fw_ddb_entry->iscsi_max_outsnd_r2t = cpu_to_le16(sess->max_r2t); if (sess->first_burst) fw_ddb_entry->iscsi_first_burst_len = cpu_to_le16((sess->first_burst / BYTE_UNITS)); if (sess->max_burst) fw_ddb_entry->iscsi_max_burst_len = cpu_to_le16((sess->max_burst / BYTE_UNITS)); if (sess->time2wait) fw_ddb_entry->iscsi_def_time2wait = cpu_to_le16(sess->time2wait); if (sess->time2retain) fw_ddb_entry->iscsi_def_time2retain = cpu_to_le16(sess->time2retain); status = qla4xxx_set_ddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry_dma, mbx_sts); if (status != QLA_SUCCESS) rval = -EINVAL; exit_set_param: dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry, fw_ddb_entry_dma); exit_set_param_no_free: return rval; } int qla4xxx_get_mgmt_data(struct scsi_qla_host *ha, uint16_t fw_ddb_index, uint16_t stats_size, dma_addr_t stats_dma) { int status = QLA_SUCCESS; uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT); memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT); mbox_cmd[0] = MBOX_CMD_GET_MANAGEMENT_DATA; mbox_cmd[1] = fw_ddb_index; mbox_cmd[2] = LSDW(stats_dma); mbox_cmd[3] = MSDW(stats_dma); mbox_cmd[4] = stats_size; status = qla4xxx_mailbox_command(ha, 5, 1, &mbox_cmd[0], &mbox_sts[0]); if (status != QLA_SUCCESS) { DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_GET_MANAGEMENT_DATA " "failed w/ status %04X\n", __func__, mbox_sts[0])); } return status; } int qla4xxx_get_ip_state(struct scsi_qla_host *ha, uint32_t acb_idx, uint32_t ip_idx, uint32_t *sts) { uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; int status = QLA_SUCCESS; memset(&mbox_cmd, 0, sizeof(mbox_cmd)); memset(&mbox_sts, 0, sizeof(mbox_sts)); mbox_cmd[0] = MBOX_CMD_GET_IP_ADDR_STATE; mbox_cmd[1] = acb_idx; mbox_cmd[2] = ip_idx; status = qla4xxx_mailbox_command(ha, 3, 8, &mbox_cmd[0], &mbox_sts[0]); if (status != QLA_SUCCESS) { DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: " "MBOX_CMD_GET_IP_ADDR_STATE failed w/ " "status %04X\n", __func__, mbox_sts[0])); } memcpy(sts, mbox_sts, sizeof(mbox_sts)); return status; } int qla4xxx_get_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma, uint32_t offset, uint32_t size) { int status = QLA_SUCCESS; uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; memset(&mbox_cmd, 0, sizeof(mbox_cmd)); memset(&mbox_sts, 0, sizeof(mbox_sts)); mbox_cmd[0] = MBOX_CMD_GET_NVRAM; mbox_cmd[1] = LSDW(nvram_dma); mbox_cmd[2] = MSDW(nvram_dma); mbox_cmd[3] = offset; mbox_cmd[4] = size; status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]); if (status != QLA_SUCCESS) { DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed " "status %04X\n", ha->host_no, __func__, mbox_sts[0])); } return status; } int qla4xxx_set_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma, uint32_t offset, uint32_t size) { int status = QLA_SUCCESS; uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; memset(&mbox_cmd, 0, sizeof(mbox_cmd)); memset(&mbox_sts, 0, sizeof(mbox_sts)); mbox_cmd[0] = MBOX_CMD_SET_NVRAM; mbox_cmd[1] = LSDW(nvram_dma); mbox_cmd[2] = MSDW(nvram_dma); mbox_cmd[3] = offset; mbox_cmd[4] = size; status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]); if (status != QLA_SUCCESS) { DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed " "status %04X\n", ha->host_no, __func__, mbox_sts[0])); } return status; } int qla4xxx_restore_factory_defaults(struct scsi_qla_host *ha, uint32_t region, uint32_t field0, uint32_t field1) { int status = QLA_SUCCESS; uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; memset(&mbox_cmd, 0, sizeof(mbox_cmd)); memset(&mbox_sts, 0, sizeof(mbox_sts)); mbox_cmd[0] = MBOX_CMD_RESTORE_FACTORY_DEFAULTS; mbox_cmd[3] = region; mbox_cmd[4] = field0; mbox_cmd[5] = field1; status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 3, &mbox_cmd[0], &mbox_sts[0]); if (status != QLA_SUCCESS) { DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed " "status %04X\n", ha->host_no, __func__, mbox_sts[0])); } return status; } /** * qla4_8xxx_set_param - set driver version in firmware. * @ha: Pointer to host adapter structure. * @param: Parameter to set i.e driver version **/ int qla4_8xxx_set_param(struct scsi_qla_host *ha, int param) { uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; uint32_t status; memset(&mbox_cmd, 0, sizeof(mbox_cmd)); memset(&mbox_sts, 0, sizeof(mbox_sts)); mbox_cmd[0] = MBOX_CMD_SET_PARAM; if (param == SET_DRVR_VERSION) { mbox_cmd[1] = SET_DRVR_VERSION; strncpy((char *)&mbox_cmd[2], QLA4XXX_DRIVER_VERSION, MAX_DRVR_VER_LEN - 1); } else { ql4_printk(KERN_ERR, ha, "%s: invalid parameter 0x%x\n", __func__, param); status = QLA_ERROR; goto exit_set_param; } status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, mbox_cmd, mbox_sts); if (status == QLA_ERROR) ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", __func__, mbox_sts[0]); exit_set_param: return status; } /** * qla4_83xx_post_idc_ack - post IDC ACK * @ha: Pointer to host adapter structure. * * Posts IDC ACK for IDC Request Notification AEN. **/ int qla4_83xx_post_idc_ack(struct scsi_qla_host *ha) { uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; int status; memset(&mbox_cmd, 0, sizeof(mbox_cmd)); memset(&mbox_sts, 0, sizeof(mbox_sts)); mbox_cmd[0] = MBOX_CMD_IDC_ACK; mbox_cmd[1] = ha->idc_info.request_desc; mbox_cmd[2] = ha->idc_info.info1; mbox_cmd[3] = ha->idc_info.info2; mbox_cmd[4] = ha->idc_info.info3; status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT, mbox_cmd, mbox_sts); if (status == QLA_ERROR) ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", __func__, mbox_sts[0]); else ql4_printk(KERN_INFO, ha, "%s: IDC ACK posted\n", __func__); return status; } int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config) { uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; struct addr_ctrl_blk *acb = NULL; uint32_t acb_len = sizeof(struct addr_ctrl_blk); int rval = QLA_SUCCESS; dma_addr_t acb_dma; acb = dma_alloc_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), &acb_dma, GFP_KERNEL); if (!acb) { ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n", __func__); rval = QLA_ERROR; goto exit_config_acb; } memset(acb, 0, acb_len); switch (acb_config) { case ACB_CONFIG_DISABLE: rval = qla4xxx_get_acb(ha, acb_dma, 0, acb_len); if (rval != QLA_SUCCESS) goto exit_free_acb; rval = qla4xxx_disable_acb(ha); if (rval != QLA_SUCCESS) goto exit_free_acb; if (!ha->saved_acb) ha->saved_acb = kzalloc(acb_len, GFP_KERNEL); if (!ha->saved_acb) { ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n", __func__); rval = QLA_ERROR; goto exit_free_acb; } memcpy(ha->saved_acb, acb, acb_len); break; case ACB_CONFIG_SET: if (!ha->saved_acb) { ql4_printk(KERN_ERR, ha, "%s: Can't set ACB, Saved ACB not available\n", __func__); rval = QLA_ERROR; goto exit_free_acb; } memcpy(acb, ha->saved_acb, acb_len); rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma); if (rval != QLA_SUCCESS) goto exit_free_acb; break; default: ql4_printk(KERN_ERR, ha, "%s: Invalid ACB Configuration\n", __func__); } exit_free_acb: dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), acb, acb_dma); exit_config_acb: if ((acb_config == ACB_CONFIG_SET) && ha->saved_acb) { kfree(ha->saved_acb); ha->saved_acb = NULL; } DEBUG2(ql4_printk(KERN_INFO, ha, "%s %s\n", __func__, rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED")); return rval; } int qla4_83xx_get_port_config(struct scsi_qla_host *ha, uint32_t *config) { uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; int status; memset(&mbox_cmd, 0, sizeof(mbox_cmd)); memset(&mbox_sts, 0, sizeof(mbox_sts)); mbox_cmd[0] = MBOX_CMD_GET_PORT_CONFIG; status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT, mbox_cmd, mbox_sts); if (status == QLA_SUCCESS) *config = mbox_sts[1]; else ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", __func__, mbox_sts[0]); return status; } int qla4_83xx_set_port_config(struct scsi_qla_host *ha, uint32_t *config) { uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; int status; memset(&mbox_cmd, 0, sizeof(mbox_cmd)); memset(&mbox_sts, 0, sizeof(mbox_sts)); mbox_cmd[0] = MBOX_CMD_SET_PORT_CONFIG; mbox_cmd[1] = *config; status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT, mbox_cmd, mbox_sts); if (status != QLA_SUCCESS) ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", __func__, mbox_sts[0]); return status; }
linux-master
drivers/scsi/qla4xxx/ql4_mbx.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic iSCSI HBA Driver * Copyright (c) 2003-2013 QLogic Corporation */ #include "ql4_def.h" #include "ql4_glbl.h" #include "ql4_dbg.h" #include "ql4_inline.h" #include <scsi/scsi_tcq.h> static int qla4xxx_space_in_req_ring(struct scsi_qla_host *ha, uint16_t req_cnt) { uint16_t cnt; /* Calculate number of free request entries. */ if ((req_cnt + 2) >= ha->req_q_count) { cnt = (uint16_t) ha->isp_ops->rd_shdw_req_q_out(ha); if (ha->request_in < cnt) ha->req_q_count = cnt - ha->request_in; else ha->req_q_count = REQUEST_QUEUE_DEPTH - (ha->request_in - cnt); } /* Check if room for request in request ring. */ if ((req_cnt + 2) < ha->req_q_count) return 1; else return 0; } static void qla4xxx_advance_req_ring_ptr(struct scsi_qla_host *ha) { /* Advance request queue pointer */ if (ha->request_in == (REQUEST_QUEUE_DEPTH - 1)) { ha->request_in = 0; ha->request_ptr = ha->request_ring; } else { ha->request_in++; ha->request_ptr++; } } /** * qla4xxx_get_req_pkt - returns a valid entry in request queue. * @ha: Pointer to host adapter structure. * @queue_entry: Pointer to pointer to queue entry structure * * This routine performs the following tasks: * - returns the current request_in pointer (if queue not full) * - advances the request_in pointer * - checks for queue full **/ static int qla4xxx_get_req_pkt(struct scsi_qla_host *ha, struct queue_entry **queue_entry) { uint16_t req_cnt = 1; if (qla4xxx_space_in_req_ring(ha, req_cnt)) { *queue_entry = ha->request_ptr; memset(*queue_entry, 0, sizeof(**queue_entry)); qla4xxx_advance_req_ring_ptr(ha); ha->req_q_count -= req_cnt; return QLA_SUCCESS; } return QLA_ERROR; } /** * qla4xxx_send_marker_iocb - issues marker iocb to HBA * @ha: Pointer to host adapter structure. * @ddb_entry: Pointer to device database entry * @lun: SCSI LUN * @mrkr_mod: marker identifier * * This routine issues a marker IOCB. **/ int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry, uint64_t lun, uint16_t mrkr_mod) { struct qla4_marker_entry *marker_entry; unsigned long flags = 0; uint8_t status = QLA_SUCCESS; /* Acquire hardware specific lock */ spin_lock_irqsave(&ha->hardware_lock, flags); /* Get pointer to the queue entry for the marker */ if (qla4xxx_get_req_pkt(ha, (struct queue_entry **) &marker_entry) != QLA_SUCCESS) { status = QLA_ERROR; goto exit_send_marker; } /* Put the marker in the request queue */ marker_entry->hdr.entryType = ET_MARKER; marker_entry->hdr.entryCount = 1; marker_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index); marker_entry->modifier = cpu_to_le16(mrkr_mod); int_to_scsilun(lun, &marker_entry->lun); wmb(); /* Tell ISP it's got a new I/O request */ ha->isp_ops->queue_iocb(ha); exit_send_marker: spin_unlock_irqrestore(&ha->hardware_lock, flags); return status; } static struct continuation_t1_entry * qla4xxx_alloc_cont_entry(struct scsi_qla_host *ha) { struct continuation_t1_entry *cont_entry; cont_entry = (struct continuation_t1_entry *)ha->request_ptr; qla4xxx_advance_req_ring_ptr(ha); /* Load packet defaults */ cont_entry->hdr.entryType = ET_CONTINUE; cont_entry->hdr.entryCount = 1; cont_entry->hdr.systemDefined = (uint8_t) cpu_to_le16(ha->request_in); return cont_entry; } static uint16_t qla4xxx_calc_request_entries(uint16_t dsds) { uint16_t iocbs; iocbs = 1; if (dsds > COMMAND_SEG) { iocbs += (dsds - COMMAND_SEG) / CONTINUE_SEG; if ((dsds - COMMAND_SEG) % CONTINUE_SEG) iocbs++; } return iocbs; } static void qla4xxx_build_scsi_iocbs(struct srb *srb, struct command_t3_entry *cmd_entry, uint16_t tot_dsds) { struct scsi_qla_host *ha; uint16_t avail_dsds; struct data_seg_a64 *cur_dsd; struct scsi_cmnd *cmd; struct scatterlist *sg; int i; cmd = srb->cmd; ha = srb->ha; if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { /* No data being transferred */ cmd_entry->ttlByteCnt = cpu_to_le32(0); return; } avail_dsds = COMMAND_SEG; cur_dsd = (struct data_seg_a64 *) & (cmd_entry->dataseg[0]); scsi_for_each_sg(cmd, sg, tot_dsds, i) { dma_addr_t sle_dma; /* Allocate additional continuation packets? */ if (avail_dsds == 0) { struct continuation_t1_entry *cont_entry; cont_entry = qla4xxx_alloc_cont_entry(ha); cur_dsd = (struct data_seg_a64 *) &cont_entry->dataseg[0]; avail_dsds = CONTINUE_SEG; } sle_dma = sg_dma_address(sg); cur_dsd->base.addrLow = cpu_to_le32(LSDW(sle_dma)); cur_dsd->base.addrHigh = cpu_to_le32(MSDW(sle_dma)); cur_dsd->count = cpu_to_le32(sg_dma_len(sg)); avail_dsds--; cur_dsd++; } } void qla4_83xx_queue_iocb(struct scsi_qla_host *ha) { writel(ha->request_in, &ha->qla4_83xx_reg->req_q_in); readl(&ha->qla4_83xx_reg->req_q_in); } void qla4_83xx_complete_iocb(struct scsi_qla_host *ha) { writel(ha->response_out, &ha->qla4_83xx_reg->rsp_q_out); readl(&ha->qla4_83xx_reg->rsp_q_out); } /** * qla4_82xx_queue_iocb - Tell ISP it's got new request(s) * @ha: pointer to host adapter structure. * * This routine notifies the ISP that one or more new request * queue entries have been placed on the request queue. **/ void qla4_82xx_queue_iocb(struct scsi_qla_host *ha) { uint32_t dbval = 0; dbval = 0x14 | (ha->func_num << 5); dbval = dbval | (0 << 8) | (ha->request_in << 16); qla4_82xx_wr_32(ha, ha->nx_db_wr_ptr, ha->request_in); } /** * qla4_82xx_complete_iocb - Tell ISP we're done with response(s) * @ha: pointer to host adapter structure. * * This routine notifies the ISP that one or more response/completion * queue entries have been processed by the driver. * This also clears the interrupt. **/ void qla4_82xx_complete_iocb(struct scsi_qla_host *ha) { writel(ha->response_out, &ha->qla4_82xx_reg->rsp_q_out); readl(&ha->qla4_82xx_reg->rsp_q_out); } /** * qla4xxx_queue_iocb - Tell ISP it's got new request(s) * @ha: pointer to host adapter structure. * * This routine is notifies the ISP that one or more new request * queue entries have been placed on the request queue. **/ void qla4xxx_queue_iocb(struct scsi_qla_host *ha) { writel(ha->request_in, &ha->reg->req_q_in); readl(&ha->reg->req_q_in); } /** * qla4xxx_complete_iocb - Tell ISP we're done with response(s) * @ha: pointer to host adapter structure. * * This routine is notifies the ISP that one or more response/completion * queue entries have been processed by the driver. * This also clears the interrupt. **/ void qla4xxx_complete_iocb(struct scsi_qla_host *ha) { writel(ha->response_out, &ha->reg->rsp_q_out); readl(&ha->reg->rsp_q_out); } /** * qla4xxx_send_command_to_isp - issues command to HBA * @ha: pointer to host adapter structure. * @srb: pointer to SCSI Request Block to be sent to ISP * * This routine is called by qla4xxx_queuecommand to build an ISP * command and pass it to the ISP for execution. **/ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb) { struct scsi_cmnd *cmd = srb->cmd; struct ddb_entry *ddb_entry; struct command_t3_entry *cmd_entry; int nseg; uint16_t tot_dsds; uint16_t req_cnt; unsigned long flags; uint32_t index; /* Get real lun and adapter */ ddb_entry = srb->ddb; tot_dsds = 0; /* Acquire hardware specific lock */ spin_lock_irqsave(&ha->hardware_lock, flags); index = scsi_cmd_to_rq(cmd)->tag; /* * Check to see if adapter is online before placing request on * request queue. If a reset occurs and a request is in the queue, * the firmware will still attempt to process the request, retrieving * garbage for pointers. */ if (!test_bit(AF_ONLINE, &ha->flags)) { DEBUG2(printk("scsi%ld: %s: Adapter OFFLINE! " "Do not issue command.\n", ha->host_no, __func__)); goto queuing_error; } /* Calculate the number of request entries needed. */ nseg = scsi_dma_map(cmd); if (nseg < 0) goto queuing_error; tot_dsds = nseg; req_cnt = qla4xxx_calc_request_entries(tot_dsds); if (!qla4xxx_space_in_req_ring(ha, req_cnt)) goto queuing_error; /* total iocbs active */ if ((ha->iocb_cnt + req_cnt) >= ha->iocb_hiwat) goto queuing_error; /* Build command packet */ cmd_entry = (struct command_t3_entry *) ha->request_ptr; memset(cmd_entry, 0, sizeof(struct command_t3_entry)); cmd_entry->hdr.entryType = ET_COMMAND; cmd_entry->handle = cpu_to_le32(index); cmd_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index); int_to_scsilun(cmd->device->lun, &cmd_entry->lun); cmd_entry->ttlByteCnt = cpu_to_le32(scsi_bufflen(cmd)); memcpy(cmd_entry->cdb, cmd->cmnd, cmd->cmd_len); cmd_entry->dataSegCnt = cpu_to_le16(tot_dsds); cmd_entry->hdr.entryCount = req_cnt; /* Set data transfer direction control flags * NOTE: Look at data_direction bits iff there is data to be * transferred, as the data direction bit is sometimed filled * in when there is no data to be transferred */ cmd_entry->control_flags = CF_NO_DATA; if (scsi_bufflen(cmd)) { if (cmd->sc_data_direction == DMA_TO_DEVICE) cmd_entry->control_flags = CF_WRITE; else if (cmd->sc_data_direction == DMA_FROM_DEVICE) cmd_entry->control_flags = CF_READ; ha->bytes_xfered += scsi_bufflen(cmd); if (ha->bytes_xfered & ~0xFFFFF){ ha->total_mbytes_xferred += ha->bytes_xfered >> 20; ha->bytes_xfered &= 0xFFFFF; } } /* Set tagged queueing control flags */ cmd_entry->control_flags |= CF_SIMPLE_TAG; qla4xxx_advance_req_ring_ptr(ha); qla4xxx_build_scsi_iocbs(srb, cmd_entry, tot_dsds); wmb(); srb->cmd->host_scribble = (unsigned char *)(unsigned long)index; /* update counters */ srb->state = SRB_ACTIVE_STATE; srb->flags |= SRB_DMA_VALID; /* Track IOCB used */ ha->iocb_cnt += req_cnt; srb->iocb_cnt = req_cnt; ha->req_q_count -= req_cnt; ha->isp_ops->queue_iocb(ha); spin_unlock_irqrestore(&ha->hardware_lock, flags); return QLA_SUCCESS; queuing_error: if (tot_dsds) scsi_dma_unmap(cmd); spin_unlock_irqrestore(&ha->hardware_lock, flags); return QLA_ERROR; } int qla4xxx_send_passthru0(struct iscsi_task *task) { struct passthru0 *passthru_iocb; struct iscsi_session *sess = task->conn->session; struct ddb_entry *ddb_entry = sess->dd_data; struct scsi_qla_host *ha = ddb_entry->ha; struct ql4_task_data *task_data = task->dd_data; uint16_t ctrl_flags = 0; unsigned long flags; int ret = QLA_ERROR; spin_lock_irqsave(&ha->hardware_lock, flags); task_data->iocb_req_cnt = 1; /* Put the IOCB on the request queue */ if (!qla4xxx_space_in_req_ring(ha, task_data->iocb_req_cnt)) goto queuing_error; passthru_iocb = (struct passthru0 *) ha->request_ptr; memset(passthru_iocb, 0, sizeof(struct passthru0)); passthru_iocb->hdr.entryType = ET_PASSTHRU0; passthru_iocb->hdr.systemDefined = SD_ISCSI_PDU; passthru_iocb->hdr.entryCount = task_data->iocb_req_cnt; passthru_iocb->handle = task->itt; passthru_iocb->target = cpu_to_le16(ddb_entry->fw_ddb_index); passthru_iocb->timeout = cpu_to_le16(PT_DEFAULT_TIMEOUT); /* Setup the out & in DSDs */ if (task_data->req_len) { memcpy((uint8_t *)task_data->req_buffer + sizeof(struct iscsi_hdr), task->data, task->data_count); ctrl_flags |= PT_FLAG_SEND_BUFFER; passthru_iocb->out_dsd.base.addrLow = cpu_to_le32(LSDW(task_data->req_dma)); passthru_iocb->out_dsd.base.addrHigh = cpu_to_le32(MSDW(task_data->req_dma)); passthru_iocb->out_dsd.count = cpu_to_le32(task->data_count + sizeof(struct iscsi_hdr)); } if (task_data->resp_len) { passthru_iocb->in_dsd.base.addrLow = cpu_to_le32(LSDW(task_data->resp_dma)); passthru_iocb->in_dsd.base.addrHigh = cpu_to_le32(MSDW(task_data->resp_dma)); passthru_iocb->in_dsd.count = cpu_to_le32(task_data->resp_len); } ctrl_flags |= (PT_FLAG_ISCSI_PDU | PT_FLAG_WAIT_4_RESPONSE); passthru_iocb->control_flags = cpu_to_le16(ctrl_flags); /* Update the request pointer */ qla4xxx_advance_req_ring_ptr(ha); wmb(); /* Track IOCB used */ ha->iocb_cnt += task_data->iocb_req_cnt; ha->req_q_count -= task_data->iocb_req_cnt; ha->isp_ops->queue_iocb(ha); ret = QLA_SUCCESS; queuing_error: spin_unlock_irqrestore(&ha->hardware_lock, flags); return ret; } static struct mrb *qla4xxx_get_new_mrb(struct scsi_qla_host *ha) { struct mrb *mrb; mrb = kzalloc(sizeof(*mrb), GFP_KERNEL); if (!mrb) return mrb; mrb->ha = ha; return mrb; } static int qla4xxx_send_mbox_iocb(struct scsi_qla_host *ha, struct mrb *mrb, uint32_t *in_mbox) { int rval = QLA_SUCCESS; uint32_t i; unsigned long flags; uint32_t index = 0; /* Acquire hardware specific lock */ spin_lock_irqsave(&ha->hardware_lock, flags); /* Get pointer to the queue entry for the marker */ rval = qla4xxx_get_req_pkt(ha, (struct queue_entry **) &(mrb->mbox)); if (rval != QLA_SUCCESS) goto exit_mbox_iocb; index = ha->mrb_index; /* get valid mrb index*/ for (i = 0; i < MAX_MRB; i++) { index++; if (index == MAX_MRB) index = 1; if (ha->active_mrb_array[index] == NULL) { ha->mrb_index = index; break; } } mrb->iocb_cnt = 1; ha->active_mrb_array[index] = mrb; mrb->mbox->handle = index; mrb->mbox->hdr.entryType = ET_MBOX_CMD; mrb->mbox->hdr.entryCount = mrb->iocb_cnt; memcpy(mrb->mbox->in_mbox, in_mbox, 32); mrb->mbox_cmd = in_mbox[0]; wmb(); ha->iocb_cnt += mrb->iocb_cnt; ha->isp_ops->queue_iocb(ha); exit_mbox_iocb: spin_unlock_irqrestore(&ha->hardware_lock, flags); return rval; } int qla4xxx_ping_iocb(struct scsi_qla_host *ha, uint32_t options, uint32_t payload_size, uint32_t pid, uint8_t *ipaddr) { uint32_t in_mbox[8]; struct mrb *mrb = NULL; int rval = QLA_SUCCESS; memset(in_mbox, 0, sizeof(in_mbox)); mrb = qla4xxx_get_new_mrb(ha); if (!mrb) { DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: fail to get new mrb\n", __func__)); rval = QLA_ERROR; goto exit_ping; } in_mbox[0] = MBOX_CMD_PING; in_mbox[1] = options; memcpy(&in_mbox[2], &ipaddr[0], 4); memcpy(&in_mbox[3], &ipaddr[4], 4); memcpy(&in_mbox[4], &ipaddr[8], 4); memcpy(&in_mbox[5], &ipaddr[12], 4); in_mbox[6] = payload_size; mrb->pid = pid; rval = qla4xxx_send_mbox_iocb(ha, mrb, in_mbox); if (rval != QLA_SUCCESS) goto exit_ping; return rval; exit_ping: kfree(mrb); return rval; }
linux-master
drivers/scsi/qla4xxx/ql4_iocb.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic iSCSI HBA Driver * Copyright (c) 2003-2013 QLogic Corporation */ #include <linux/delay.h> #include <linux/io.h> #include <linux/pci.h> #include <linux/ratelimit.h> #include "ql4_def.h" #include "ql4_glbl.h" #include "ql4_inline.h" #include <linux/io-64-nonatomic-lo-hi.h> #define TIMEOUT_100_MS 100 #define MASK(n) DMA_BIT_MASK(n) #define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | ((addr >> 25) & 0x3ff)) #define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | ((addr >> 25) & 0x3ff)) #define MS_WIN(addr) (addr & 0x0ffc0000) #define QLA82XX_PCI_MN_2M (0) #define QLA82XX_PCI_MS_2M (0x80000) #define QLA82XX_PCI_OCM0_2M (0xc0000) #define VALID_OCM_ADDR(addr) (((addr) & 0x3f800) != 0x3f800) #define GET_MEM_OFFS_2M(addr) (addr & MASK(18)) /* CRB window related */ #define CRB_BLK(off) ((off >> 20) & 0x3f) #define CRB_SUBBLK(off) ((off >> 16) & 0xf) #define CRB_WINDOW_2M (0x130060) #define CRB_HI(off) ((qla4_82xx_crb_hub_agt[CRB_BLK(off)] << 20) | \ ((off) & 0xf0000)) #define QLA82XX_PCI_CAMQM_2M_END (0x04800800UL) #define QLA82XX_PCI_CAMQM_2M_BASE (0x000ff800UL) #define CRB_INDIRECT_2M (0x1e0000UL) static inline void __iomem * qla4_8xxx_pci_base_offsetfset(struct scsi_qla_host *ha, unsigned long off) { if ((off < ha->first_page_group_end) && (off >= ha->first_page_group_start)) return (void __iomem *)(ha->nx_pcibase + off); return NULL; } static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC }; #define MAX_CRB_XFORM 60 static unsigned long crb_addr_xform[MAX_CRB_XFORM]; static int qla4_8xxx_crb_table_initialized; #define qla4_8xxx_crb_addr_transform(name) \ (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \ QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20) static void qla4_82xx_crb_addr_transform_setup(void) { qla4_8xxx_crb_addr_transform(XDMA); qla4_8xxx_crb_addr_transform(TIMR); qla4_8xxx_crb_addr_transform(SRE); qla4_8xxx_crb_addr_transform(SQN3); qla4_8xxx_crb_addr_transform(SQN2); qla4_8xxx_crb_addr_transform(SQN1); qla4_8xxx_crb_addr_transform(SQN0); qla4_8xxx_crb_addr_transform(SQS3); qla4_8xxx_crb_addr_transform(SQS2); qla4_8xxx_crb_addr_transform(SQS1); qla4_8xxx_crb_addr_transform(SQS0); qla4_8xxx_crb_addr_transform(RPMX7); qla4_8xxx_crb_addr_transform(RPMX6); qla4_8xxx_crb_addr_transform(RPMX5); qla4_8xxx_crb_addr_transform(RPMX4); qla4_8xxx_crb_addr_transform(RPMX3); qla4_8xxx_crb_addr_transform(RPMX2); qla4_8xxx_crb_addr_transform(RPMX1); qla4_8xxx_crb_addr_transform(RPMX0); qla4_8xxx_crb_addr_transform(ROMUSB); qla4_8xxx_crb_addr_transform(SN); qla4_8xxx_crb_addr_transform(QMN); qla4_8xxx_crb_addr_transform(QMS); qla4_8xxx_crb_addr_transform(PGNI); qla4_8xxx_crb_addr_transform(PGND); qla4_8xxx_crb_addr_transform(PGN3); qla4_8xxx_crb_addr_transform(PGN2); qla4_8xxx_crb_addr_transform(PGN1); qla4_8xxx_crb_addr_transform(PGN0); qla4_8xxx_crb_addr_transform(PGSI); qla4_8xxx_crb_addr_transform(PGSD); qla4_8xxx_crb_addr_transform(PGS3); qla4_8xxx_crb_addr_transform(PGS2); qla4_8xxx_crb_addr_transform(PGS1); qla4_8xxx_crb_addr_transform(PGS0); qla4_8xxx_crb_addr_transform(PS); qla4_8xxx_crb_addr_transform(PH); qla4_8xxx_crb_addr_transform(NIU); qla4_8xxx_crb_addr_transform(I2Q); qla4_8xxx_crb_addr_transform(EG); qla4_8xxx_crb_addr_transform(MN); qla4_8xxx_crb_addr_transform(MS); qla4_8xxx_crb_addr_transform(CAS2); qla4_8xxx_crb_addr_transform(CAS1); qla4_8xxx_crb_addr_transform(CAS0); qla4_8xxx_crb_addr_transform(CAM); qla4_8xxx_crb_addr_transform(C2C1); qla4_8xxx_crb_addr_transform(C2C0); qla4_8xxx_crb_addr_transform(SMB); qla4_8xxx_crb_addr_transform(OCM0); qla4_8xxx_crb_addr_transform(I2C0); qla4_8xxx_crb_table_initialized = 1; } static struct crb_128M_2M_block_map crb_128M_2M_map[64] = { {{{0, 0, 0, 0} } }, /* 0: PCI */ {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */ {1, 0x0110000, 0x0120000, 0x130000}, {1, 0x0120000, 0x0122000, 0x124000}, {1, 0x0130000, 0x0132000, 0x126000}, {1, 0x0140000, 0x0142000, 0x128000}, {1, 0x0150000, 0x0152000, 0x12a000}, {1, 0x0160000, 0x0170000, 0x110000}, {1, 0x0170000, 0x0172000, 0x12e000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x01e0000, 0x01e0800, 0x122000}, {0, 0x0000000, 0x0000000, 0x000000} } }, {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */ {{{0, 0, 0, 0} } }, /* 3: */ {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */ {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */ {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */ {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */ {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */ {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x08f0000, 0x08f2000, 0x172000} } }, {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/ {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x09f0000, 0x09f2000, 0x176000} } }, {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/ {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x0af0000, 0x0af2000, 0x17a000} } }, {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/ {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x0bf0000, 0x0bf2000, 0x17e000} } }, {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */ {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */ {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */ {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */ {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */ {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */ {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */ {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */ {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */ {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */ {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */ {{{0, 0, 0, 0} } }, /* 23: */ {{{0, 0, 0, 0} } }, /* 24: */ {{{0, 0, 0, 0} } }, /* 25: */ {{{0, 0, 0, 0} } }, /* 26: */ {{{0, 0, 0, 0} } }, /* 27: */ {{{0, 0, 0, 0} } }, /* 28: */ {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */ {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */ {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */ {{{0} } }, /* 32: PCI */ {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */ {1, 0x2110000, 0x2120000, 0x130000}, {1, 0x2120000, 0x2122000, 0x124000}, {1, 0x2130000, 0x2132000, 0x126000}, {1, 0x2140000, 0x2142000, 0x128000}, {1, 0x2150000, 0x2152000, 0x12a000}, {1, 0x2160000, 0x2170000, 0x110000}, {1, 0x2170000, 0x2172000, 0x12e000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000} } }, {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */ {{{0} } }, /* 35: */ {{{0} } }, /* 36: */ {{{0} } }, /* 37: */ {{{0} } }, /* 38: */ {{{0} } }, /* 39: */ {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */ {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */ {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */ {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */ {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */ {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */ {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */ {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */ {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */ {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */ {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */ {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */ {{{0} } }, /* 52: */ {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */ {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */ {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */ {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */ {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */ {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */ {{{0} } }, /* 59: I2C0 */ {{{0} } }, /* 60: I2C1 */ {{{1, 0x3d00000, 0x3d04000, 0x1dc000} } },/* 61: LPC */ {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */ {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */ }; /* * top 12 bits of crb internal address (hub, agent) */ static unsigned qla4_82xx_crb_hub_agt[64] = { 0, QLA82XX_HW_CRB_HUB_AGT_ADR_PS, QLA82XX_HW_CRB_HUB_AGT_ADR_MN, QLA82XX_HW_CRB_HUB_AGT_ADR_MS, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_SRE, QLA82XX_HW_CRB_HUB_AGT_ADR_NIU, QLA82XX_HW_CRB_HUB_AGT_ADR_QMN, QLA82XX_HW_CRB_HUB_AGT_ADR_SQN0, QLA82XX_HW_CRB_HUB_AGT_ADR_SQN1, QLA82XX_HW_CRB_HUB_AGT_ADR_SQN2, QLA82XX_HW_CRB_HUB_AGT_ADR_SQN3, QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q, QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR, QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB, QLA82XX_HW_CRB_HUB_AGT_ADR_PGN4, QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA, QLA82XX_HW_CRB_HUB_AGT_ADR_PGN0, QLA82XX_HW_CRB_HUB_AGT_ADR_PGN1, QLA82XX_HW_CRB_HUB_AGT_ADR_PGN2, QLA82XX_HW_CRB_HUB_AGT_ADR_PGN3, QLA82XX_HW_CRB_HUB_AGT_ADR_PGND, QLA82XX_HW_CRB_HUB_AGT_ADR_PGNI, QLA82XX_HW_CRB_HUB_AGT_ADR_PGS0, QLA82XX_HW_CRB_HUB_AGT_ADR_PGS1, QLA82XX_HW_CRB_HUB_AGT_ADR_PGS2, QLA82XX_HW_CRB_HUB_AGT_ADR_PGS3, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_PGSI, QLA82XX_HW_CRB_HUB_AGT_ADR_SN, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_EG, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_PS, QLA82XX_HW_CRB_HUB_AGT_ADR_CAM, 0, 0, 0, 0, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX1, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX2, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX3, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX4, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX5, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX6, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX7, QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA, QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q, QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX0, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX8, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX9, QLA82XX_HW_CRB_HUB_AGT_ADR_OCM0, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_SMB, QLA82XX_HW_CRB_HUB_AGT_ADR_I2C0, QLA82XX_HW_CRB_HUB_AGT_ADR_I2C1, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_PGNC, 0, }; /* Device states */ static char *qdev_state[] = { "Unknown", "Cold", "Initializing", "Ready", "Need Reset", "Need Quiescent", "Failed", "Quiescent", }; /* * In: 'off' is offset from CRB space in 128M pci map * Out: 'off' is 2M pci map addr * side effect: lock crb window */ static void qla4_82xx_pci_set_crbwindow_2M(struct scsi_qla_host *ha, ulong *off) { u32 win_read; ha->crb_win = CRB_HI(*off); writel(ha->crb_win, (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase)); /* Read back value to make sure write has gone through before trying * to use it. */ win_read = readl((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase)); if (win_read != ha->crb_win) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Written crbwin (0x%x) != Read crbwin (0x%x)," " off=0x%lx\n", __func__, ha->crb_win, win_read, *off)); } *off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase; } #define CRB_WIN_LOCK_TIMEOUT 100000000 /* * Context: atomic */ static int qla4_82xx_crb_win_lock(struct scsi_qla_host *ha) { int done = 0, timeout = 0; while (!done) { /* acquire semaphore3 from PCI HW block */ done = qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK)); if (done == 1) break; if (timeout >= CRB_WIN_LOCK_TIMEOUT) return -1; timeout++; udelay(10); } qla4_82xx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->func_num); return 0; } void qla4_82xx_crb_win_unlock(struct scsi_qla_host *ha) { qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK)); } void qla4_82xx_wr_32(struct scsi_qla_host *ha, ulong off, u32 data) { unsigned long flags = 0; int rv; rv = qla4_82xx_pci_get_crb_addr_2M(ha, &off); BUG_ON(rv == -1); if (rv == 1) { write_lock_irqsave(&ha->hw_lock, flags); qla4_82xx_crb_win_lock(ha); qla4_82xx_pci_set_crbwindow_2M(ha, &off); } writel(data, (void __iomem *)off); if (rv == 1) { qla4_82xx_crb_win_unlock(ha); write_unlock_irqrestore(&ha->hw_lock, flags); } } uint32_t qla4_82xx_rd_32(struct scsi_qla_host *ha, ulong off) { unsigned long flags = 0; int rv; u32 data; rv = qla4_82xx_pci_get_crb_addr_2M(ha, &off); BUG_ON(rv == -1); if (rv == 1) { write_lock_irqsave(&ha->hw_lock, flags); qla4_82xx_crb_win_lock(ha); qla4_82xx_pci_set_crbwindow_2M(ha, &off); } data = readl((void __iomem *)off); if (rv == 1) { qla4_82xx_crb_win_unlock(ha); write_unlock_irqrestore(&ha->hw_lock, flags); } return data; } /* Minidump related functions */ int qla4_82xx_md_rd_32(struct scsi_qla_host *ha, uint32_t off, uint32_t *data) { uint32_t win_read, off_value; int rval = QLA_SUCCESS; off_value = off & 0xFFFF0000; writel(off_value, (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase)); /* * Read back value to make sure write has gone through before trying * to use it. */ win_read = readl((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase)); if (win_read != off_value) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Written (0x%x) != Read (0x%x), off=0x%x\n", __func__, off_value, win_read, off)); rval = QLA_ERROR; } else { off_value = off & 0x0000FFFF; *data = readl((void __iomem *)(off_value + CRB_INDIRECT_2M + ha->nx_pcibase)); } return rval; } int qla4_82xx_md_wr_32(struct scsi_qla_host *ha, uint32_t off, uint32_t data) { uint32_t win_read, off_value; int rval = QLA_SUCCESS; off_value = off & 0xFFFF0000; writel(off_value, (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase)); /* Read back value to make sure write has gone through before trying * to use it. */ win_read = readl((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase)); if (win_read != off_value) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Written (0x%x) != Read (0x%x), off=0x%x\n", __func__, off_value, win_read, off)); rval = QLA_ERROR; } else { off_value = off & 0x0000FFFF; writel(data, (void __iomem *)(off_value + CRB_INDIRECT_2M + ha->nx_pcibase)); } return rval; } #define IDC_LOCK_TIMEOUT 100000000 /** * qla4_82xx_idc_lock - hw_lock * @ha: pointer to adapter structure * * General purpose lock used to synchronize access to * CRB_DEV_STATE, CRB_DEV_REF_COUNT, etc. * * Context: task, can sleep **/ int qla4_82xx_idc_lock(struct scsi_qla_host *ha) { int done = 0, timeout = 0; might_sleep(); while (!done) { /* acquire semaphore5 from PCI HW block */ done = qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_LOCK)); if (done == 1) break; if (timeout >= IDC_LOCK_TIMEOUT) return -1; timeout++; msleep(100); } return 0; } void qla4_82xx_idc_unlock(struct scsi_qla_host *ha) { qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK)); } int qla4_82xx_pci_get_crb_addr_2M(struct scsi_qla_host *ha, ulong *off) { struct crb_128M_2M_sub_block_map *m; if (*off >= QLA82XX_CRB_MAX) return -1; if (*off >= QLA82XX_PCI_CAMQM && (*off < QLA82XX_PCI_CAMQM_2M_END)) { *off = (*off - QLA82XX_PCI_CAMQM) + QLA82XX_PCI_CAMQM_2M_BASE + ha->nx_pcibase; return 0; } if (*off < QLA82XX_PCI_CRBSPACE) return -1; *off -= QLA82XX_PCI_CRBSPACE; /* * Try direct map */ m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)]; if (m->valid && (m->start_128M <= *off) && (m->end_128M > *off)) { *off = *off + m->start_2M - m->start_128M + ha->nx_pcibase; return 0; } /* * Not in direct map, use crb window */ return 1; } /* * check memory access boundary. * used by test agent. support ddr access only for now */ static unsigned long qla4_82xx_pci_mem_bound_check(struct scsi_qla_host *ha, unsigned long long addr, int size) { if (!QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET, QLA8XXX_ADDR_DDR_NET_MAX) || !QLA8XXX_ADDR_IN_RANGE(addr + size - 1, QLA8XXX_ADDR_DDR_NET, QLA8XXX_ADDR_DDR_NET_MAX) || ((size != 1) && (size != 2) && (size != 4) && (size != 8))) { return 0; } return 1; } static int qla4_82xx_pci_set_window_warning_count; static unsigned long qla4_82xx_pci_set_window(struct scsi_qla_host *ha, unsigned long long addr) { int window; u32 win_read; if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET, QLA8XXX_ADDR_DDR_NET_MAX)) { /* DDR network side */ window = MN_WIN(addr); ha->ddr_mn_window = window; qla4_82xx_wr_32(ha, ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window); win_read = qla4_82xx_rd_32(ha, ha->mn_win_crb | QLA82XX_PCI_CRBSPACE); if ((win_read << 17) != window) { ql4_printk(KERN_WARNING, ha, "%s: Written MNwin (0x%x) != Read MNwin (0x%x)\n", __func__, window, win_read); } addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET; } else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_OCM0, QLA8XXX_ADDR_OCM0_MAX)) { unsigned int temp1; /* if bits 19:18&17:11 are on */ if ((addr & 0x00ff800) == 0xff800) { printk("%s: QM access not handled.\n", __func__); addr = -1UL; } window = OCM_WIN(addr); ha->ddr_mn_window = window; qla4_82xx_wr_32(ha, ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window); win_read = qla4_82xx_rd_32(ha, ha->mn_win_crb | QLA82XX_PCI_CRBSPACE); temp1 = ((window & 0x1FF) << 7) | ((window & 0x0FFFE0000) >> 17); if (win_read != temp1) { printk("%s: Written OCMwin (0x%x) != Read" " OCMwin (0x%x)\n", __func__, temp1, win_read); } addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M; } else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET, QLA82XX_P3_ADDR_QDR_NET_MAX)) { /* QDR network side */ window = MS_WIN(addr); ha->qdr_sn_window = window; qla4_82xx_wr_32(ha, ha->ms_win_crb | QLA82XX_PCI_CRBSPACE, window); win_read = qla4_82xx_rd_32(ha, ha->ms_win_crb | QLA82XX_PCI_CRBSPACE); if (win_read != window) { printk("%s: Written MSwin (0x%x) != Read " "MSwin (0x%x)\n", __func__, window, win_read); } addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_QDR_NET; } else { /* * peg gdb frequently accesses memory that doesn't exist, * this limits the chit chat so debugging isn't slowed down. */ if ((qla4_82xx_pci_set_window_warning_count++ < 8) || (qla4_82xx_pci_set_window_warning_count%64 == 0)) { printk("%s: Warning:%s Unknown address range!\n", __func__, DRIVER_NAME); } addr = -1UL; } return addr; } /* check if address is in the same windows as the previous access */ static int qla4_82xx_pci_is_same_window(struct scsi_qla_host *ha, unsigned long long addr) { int window; unsigned long long qdr_max; qdr_max = QLA82XX_P3_ADDR_QDR_NET_MAX; if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET, QLA8XXX_ADDR_DDR_NET_MAX)) { /* DDR network side */ BUG(); /* MN access can not come here */ } else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_OCM0, QLA8XXX_ADDR_OCM0_MAX)) { return 1; } else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_OCM1, QLA8XXX_ADDR_OCM1_MAX)) { return 1; } else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET, qdr_max)) { /* QDR network side */ window = ((addr - QLA8XXX_ADDR_QDR_NET) >> 22) & 0x3f; if (ha->qdr_sn_window == window) return 1; } return 0; } static int qla4_82xx_pci_mem_read_direct(struct scsi_qla_host *ha, u64 off, void *data, int size) { unsigned long flags; void __iomem *addr; int ret = 0; u64 start; void __iomem *mem_ptr = NULL; unsigned long mem_base; unsigned long mem_page; write_lock_irqsave(&ha->hw_lock, flags); /* * If attempting to access unknown address or straddle hw windows, * do not access. */ start = qla4_82xx_pci_set_window(ha, off); if ((start == -1UL) || (qla4_82xx_pci_is_same_window(ha, off + size - 1) == 0)) { write_unlock_irqrestore(&ha->hw_lock, flags); printk(KERN_ERR"%s out of bound pci memory access. " "offset is 0x%llx\n", DRIVER_NAME, off); return -1; } addr = qla4_8xxx_pci_base_offsetfset(ha, start); if (!addr) { write_unlock_irqrestore(&ha->hw_lock, flags); mem_base = pci_resource_start(ha->pdev, 0); mem_page = start & PAGE_MASK; /* Map two pages whenever user tries to access addresses in two consecutive pages. */ if (mem_page != ((start + size - 1) & PAGE_MASK)) mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2); else mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE); if (mem_ptr == NULL) { *(u8 *)data = 0; return -1; } addr = mem_ptr; addr += start & (PAGE_SIZE - 1); write_lock_irqsave(&ha->hw_lock, flags); } switch (size) { case 1: *(u8 *)data = readb(addr); break; case 2: *(u16 *)data = readw(addr); break; case 4: *(u32 *)data = readl(addr); break; case 8: *(u64 *)data = readq(addr); break; default: ret = -1; break; } write_unlock_irqrestore(&ha->hw_lock, flags); if (mem_ptr) iounmap(mem_ptr); return ret; } static int qla4_82xx_pci_mem_write_direct(struct scsi_qla_host *ha, u64 off, void *data, int size) { unsigned long flags; void __iomem *addr; int ret = 0; u64 start; void __iomem *mem_ptr = NULL; unsigned long mem_base; unsigned long mem_page; write_lock_irqsave(&ha->hw_lock, flags); /* * If attempting to access unknown address or straddle hw windows, * do not access. */ start = qla4_82xx_pci_set_window(ha, off); if ((start == -1UL) || (qla4_82xx_pci_is_same_window(ha, off + size - 1) == 0)) { write_unlock_irqrestore(&ha->hw_lock, flags); printk(KERN_ERR"%s out of bound pci memory access. " "offset is 0x%llx\n", DRIVER_NAME, off); return -1; } addr = qla4_8xxx_pci_base_offsetfset(ha, start); if (!addr) { write_unlock_irqrestore(&ha->hw_lock, flags); mem_base = pci_resource_start(ha->pdev, 0); mem_page = start & PAGE_MASK; /* Map two pages whenever user tries to access addresses in two consecutive pages. */ if (mem_page != ((start + size - 1) & PAGE_MASK)) mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE*2); else mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE); if (mem_ptr == NULL) return -1; addr = mem_ptr; addr += start & (PAGE_SIZE - 1); write_lock_irqsave(&ha->hw_lock, flags); } switch (size) { case 1: writeb(*(u8 *)data, addr); break; case 2: writew(*(u16 *)data, addr); break; case 4: writel(*(u32 *)data, addr); break; case 8: writeq(*(u64 *)data, addr); break; default: ret = -1; break; } write_unlock_irqrestore(&ha->hw_lock, flags); if (mem_ptr) iounmap(mem_ptr); return ret; } #define MTU_FUDGE_FACTOR 100 static unsigned long qla4_82xx_decode_crb_addr(unsigned long addr) { int i; unsigned long base_addr, offset, pci_base; if (!qla4_8xxx_crb_table_initialized) qla4_82xx_crb_addr_transform_setup(); pci_base = ADDR_ERROR; base_addr = addr & 0xfff00000; offset = addr & 0x000fffff; for (i = 0; i < MAX_CRB_XFORM; i++) { if (crb_addr_xform[i] == base_addr) { pci_base = i << 20; break; } } if (pci_base == ADDR_ERROR) return pci_base; else return pci_base + offset; } static long rom_max_timeout = 100; static long qla4_82xx_rom_lock_timeout = 100; /* * Context: task, can_sleep */ static int qla4_82xx_rom_lock(struct scsi_qla_host *ha) { int done = 0, timeout = 0; might_sleep(); while (!done) { /* acquire semaphore2 from PCI HW block */ done = qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK)); if (done == 1) break; if (timeout >= qla4_82xx_rom_lock_timeout) return -1; timeout++; msleep(20); } qla4_82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER); return 0; } static void qla4_82xx_rom_unlock(struct scsi_qla_host *ha) { qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); } static int qla4_82xx_wait_rom_done(struct scsi_qla_host *ha) { long timeout = 0; long done = 0 ; while (done == 0) { done = qla4_82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS); done &= 2; timeout++; if (timeout >= rom_max_timeout) { printk("%s: Timeout reached waiting for rom done", DRIVER_NAME); return -1; } } return 0; } static int qla4_82xx_do_rom_fast_read(struct scsi_qla_host *ha, int addr, int *valp) { qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr); qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3); qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb); if (qla4_82xx_wait_rom_done(ha)) { printk("%s: Error waiting for rom done\n", DRIVER_NAME); return -1; } /* reset abyte_cnt and dummy_byte_cnt */ qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); udelay(10); qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0); *valp = qla4_82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA); return 0; } static int qla4_82xx_rom_fast_read(struct scsi_qla_host *ha, int addr, int *valp) { int ret, loops = 0; while ((qla4_82xx_rom_lock(ha) != 0) && (loops < 50000)) { udelay(100); loops++; } if (loops >= 50000) { ql4_printk(KERN_WARNING, ha, "%s: qla4_82xx_rom_lock failed\n", DRIVER_NAME); return -1; } ret = qla4_82xx_do_rom_fast_read(ha, addr, valp); qla4_82xx_rom_unlock(ha); return ret; } /* * This routine does CRB initialize sequence * to put the ISP into operational state */ static int qla4_82xx_pinit_from_rom(struct scsi_qla_host *ha, int verbose) { int addr, val; int i ; struct crb_addr_pair *buf; unsigned long off; unsigned offset, n; struct crb_addr_pair { long addr; long data; }; /* Halt all the indiviual PEGs and other blocks of the ISP */ qla4_82xx_rom_lock(ha); /* disable all I2Q */ qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x10, 0x0); qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x14, 0x0); qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x18, 0x0); qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x1c, 0x0); qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x20, 0x0); qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x24, 0x0); /* disable all niu interrupts */ qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff); /* disable xge rx/tx */ qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00); /* disable xg1 rx/tx */ qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00); /* disable sideband mac */ qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x90000, 0x00); /* disable ap0 mac */ qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xa0000, 0x00); /* disable ap1 mac */ qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xb0000, 0x00); /* halt sre */ val = qla4_82xx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000); qla4_82xx_wr_32(ha, QLA82XX_CRB_SRE + 0x1000, val & (~(0x1))); /* halt epg */ qla4_82xx_wr_32(ha, QLA82XX_CRB_EPG + 0x1300, 0x1); /* halt timers */ qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x0, 0x0); qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x8, 0x0); qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0); qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0); qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0); qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x200, 0x0); /* halt pegs */ qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1); qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c, 1); qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1); qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1); qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1); msleep(5); /* big hammer */ if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) /* don't reset CAM block on reset */ qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff); else qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff); qla4_82xx_rom_unlock(ha); /* Read the signature value from the flash. * Offset 0: Contain signature (0xcafecafe) * Offset 4: Offset and number of addr/value pairs * that present in CRB initialize sequence */ if (qla4_82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL || qla4_82xx_rom_fast_read(ha, 4, &n) != 0) { ql4_printk(KERN_WARNING, ha, "[ERROR] Reading crb_init area: n: %08x\n", n); return -1; } /* Offset in flash = lower 16 bits * Number of enteries = upper 16 bits */ offset = n & 0xffffU; n = (n >> 16) & 0xffffU; /* number of addr/value pair should not exceed 1024 enteries */ if (n >= 1024) { ql4_printk(KERN_WARNING, ha, "%s: %s:n=0x%x [ERROR] Card flash not initialized.\n", DRIVER_NAME, __func__, n); return -1; } ql4_printk(KERN_INFO, ha, "%s: %d CRB init values found in ROM.\n", DRIVER_NAME, n); buf = kmalloc_array(n, sizeof(struct crb_addr_pair), GFP_KERNEL); if (buf == NULL) { ql4_printk(KERN_WARNING, ha, "%s: [ERROR] Unable to malloc memory.\n", DRIVER_NAME); return -1; } for (i = 0; i < n; i++) { if (qla4_82xx_rom_fast_read(ha, 8*i + 4*offset, &val) != 0 || qla4_82xx_rom_fast_read(ha, 8*i + 4*offset + 4, &addr) != 0) { kfree(buf); return -1; } buf[i].addr = addr; buf[i].data = val; } for (i = 0; i < n; i++) { /* Translate internal CRB initialization * address to PCI bus address */ off = qla4_82xx_decode_crb_addr((unsigned long)buf[i].addr) + QLA82XX_PCI_CRBSPACE; /* Not all CRB addr/value pair to be written, * some of them are skipped */ /* skip if LS bit is set*/ if (off & 0x1) { DEBUG2(ql4_printk(KERN_WARNING, ha, "Skip CRB init replay for offset = 0x%lx\n", off)); continue; } /* skipping cold reboot MAGIC */ if (off == QLA82XX_CAM_RAM(0x1fc)) continue; /* do not reset PCI */ if (off == (ROMUSB_GLB + 0xbc)) continue; /* skip core clock, so that firmware can increase the clock */ if (off == (ROMUSB_GLB + 0xc8)) continue; /* skip the function enable register */ if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION)) continue; if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION2)) continue; if ((off & 0x0ff00000) == QLA82XX_CRB_SMB) continue; if ((off & 0x0ff00000) == QLA82XX_CRB_DDR_NET) continue; if (off == ADDR_ERROR) { ql4_printk(KERN_WARNING, ha, "%s: [ERROR] Unknown addr: 0x%08lx\n", DRIVER_NAME, buf[i].addr); continue; } qla4_82xx_wr_32(ha, off, buf[i].data); /* ISP requires much bigger delay to settle down, * else crb_window returns 0xffffffff */ if (off == QLA82XX_ROMUSB_GLB_SW_RESET) msleep(1000); /* ISP requires millisec delay between * successive CRB register updation */ msleep(1); } kfree(buf); /* Resetting the data and instruction cache */ qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0xec, 0x1e); qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0x4c, 8); qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_I+0x4c, 8); /* Clear all protocol processing engines */ qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0x8, 0); qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0xc, 0); qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0x8, 0); qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0xc, 0); qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0x8, 0); qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0xc, 0); qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0x8, 0); qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0xc, 0); return 0; } /** * qla4_8xxx_ms_mem_write_128b - Writes data to MS/off-chip memory * @ha: Pointer to adapter structure * @addr: Flash address to write to * @data: Data to be written * @count: word_count to be written * * Return: On success return QLA_SUCCESS * On error return QLA_ERROR **/ int qla4_8xxx_ms_mem_write_128b(struct scsi_qla_host *ha, uint64_t addr, uint32_t *data, uint32_t count) { int i, j; uint32_t agt_ctrl; unsigned long flags; int ret_val = QLA_SUCCESS; /* Only 128-bit aligned access */ if (addr & 0xF) { ret_val = QLA_ERROR; goto exit_ms_mem_write; } write_lock_irqsave(&ha->hw_lock, flags); /* Write address */ ret_val = ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_HI, 0); if (ret_val == QLA_ERROR) { ql4_printk(KERN_ERR, ha, "%s: write to AGT_ADDR_HI failed\n", __func__); goto exit_ms_mem_write_unlock; } for (i = 0; i < count; i++, addr += 16) { if (!((QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET, QLA8XXX_ADDR_QDR_NET_MAX)) || (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET, QLA8XXX_ADDR_DDR_NET_MAX)))) { ret_val = QLA_ERROR; goto exit_ms_mem_write_unlock; } ret_val = ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_LO, addr); /* Write data */ ret_val |= ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_WRDATA_LO, *data++); ret_val |= ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_WRDATA_HI, *data++); ret_val |= ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_WRDATA_ULO, *data++); ret_val |= ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_WRDATA_UHI, *data++); if (ret_val == QLA_ERROR) { ql4_printk(KERN_ERR, ha, "%s: write to AGT_WRDATA failed\n", __func__); goto exit_ms_mem_write_unlock; } /* Check write status */ ret_val = ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL, MIU_TA_CTL_WRITE_ENABLE); ret_val |= ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL, MIU_TA_CTL_WRITE_START); if (ret_val == QLA_ERROR) { ql4_printk(KERN_ERR, ha, "%s: write to AGT_CTRL failed\n", __func__); goto exit_ms_mem_write_unlock; } for (j = 0; j < MAX_CTL_CHECK; j++) { ret_val = ha->isp_ops->rd_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL, &agt_ctrl); if (ret_val == QLA_ERROR) { ql4_printk(KERN_ERR, ha, "%s: failed to read MD_MIU_TEST_AGT_CTRL\n", __func__); goto exit_ms_mem_write_unlock; } if ((agt_ctrl & MIU_TA_CTL_BUSY) == 0) break; } /* Status check failed */ if (j >= MAX_CTL_CHECK) { printk_ratelimited(KERN_ERR "%s: MS memory write failed!\n", __func__); ret_val = QLA_ERROR; goto exit_ms_mem_write_unlock; } } exit_ms_mem_write_unlock: write_unlock_irqrestore(&ha->hw_lock, flags); exit_ms_mem_write: return ret_val; } static int qla4_82xx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start) { int i, rval = 0; long size = 0; long flashaddr, memaddr; u64 data; u32 high, low; flashaddr = memaddr = ha->hw.flt_region_bootload; size = (image_start - flashaddr) / 8; DEBUG2(printk("scsi%ld: %s: bootldr=0x%lx, fw_image=0x%x\n", ha->host_no, __func__, flashaddr, image_start)); for (i = 0; i < size; i++) { if ((qla4_82xx_rom_fast_read(ha, flashaddr, (int *)&low)) || (qla4_82xx_rom_fast_read(ha, flashaddr + 4, (int *)&high))) { rval = -1; goto exit_load_from_flash; } data = ((u64)high << 32) | low ; rval = qla4_82xx_pci_mem_write_2M(ha, memaddr, &data, 8); if (rval) goto exit_load_from_flash; flashaddr += 8; memaddr += 8; if (i % 0x1000 == 0) msleep(1); } udelay(100); read_lock(&ha->hw_lock); qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020); qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e); read_unlock(&ha->hw_lock); exit_load_from_flash: return rval; } static int qla4_82xx_load_fw(struct scsi_qla_host *ha, uint32_t image_start) { u32 rst; qla4_82xx_wr_32(ha, CRB_CMDPEG_STATE, 0); if (qla4_82xx_pinit_from_rom(ha, 0) != QLA_SUCCESS) { printk(KERN_WARNING "%s: Error during CRB Initialization\n", __func__); return QLA_ERROR; } udelay(500); /* at this point, QM is in reset. This could be a problem if there are * incoming d* transition queue messages. QM/PCIE could wedge. * To get around this, QM is brought out of reset. */ rst = qla4_82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET); /* unreset qm */ rst &= ~(1 << 28); qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, rst); if (qla4_82xx_load_from_flash(ha, image_start)) { printk("%s: Error trying to load fw from flash!\n", __func__); return QLA_ERROR; } return QLA_SUCCESS; } int qla4_82xx_pci_mem_read_2M(struct scsi_qla_host *ha, u64 off, void *data, int size) { int i, j = 0, k, start, end, loop, sz[2], off0[2]; int shift_amount; uint32_t temp; uint64_t off8, val, mem_crb, word[2] = {0, 0}; /* * If not MN, go check for MS or invalid. */ if (off >= QLA8XXX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX) mem_crb = QLA82XX_CRB_QDR_NET; else { mem_crb = QLA82XX_CRB_DDR_NET; if (qla4_82xx_pci_mem_bound_check(ha, off, size) == 0) return qla4_82xx_pci_mem_read_direct(ha, off, data, size); } off8 = off & 0xfffffff0; off0[0] = off & 0xf; sz[0] = (size < (16 - off0[0])) ? size : (16 - off0[0]); shift_amount = 4; loop = ((off0[0] + size - 1) >> shift_amount) + 1; off0[1] = 0; sz[1] = size - sz[0]; for (i = 0; i < loop; i++) { temp = off8 + (i << shift_amount); qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp); temp = 0; qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp); temp = MIU_TA_CTL_ENABLE; qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); temp = MIU_TA_CTL_START_ENABLE; qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); for (j = 0; j < MAX_CTL_CHECK; j++) { temp = qla4_82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL); if ((temp & MIU_TA_CTL_BUSY) == 0) break; } if (j >= MAX_CTL_CHECK) { printk_ratelimited(KERN_ERR "%s: failed to read through agent\n", __func__); break; } start = off0[i] >> 2; end = (off0[i] + sz[i] - 1) >> 2; for (k = start; k <= end; k++) { temp = qla4_82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_RDDATA(k)); word[i] |= ((uint64_t)temp << (32 * (k & 1))); } } if (j >= MAX_CTL_CHECK) return -1; if ((off0[0] & 7) == 0) { val = word[0]; } else { val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) | ((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8)); } switch (size) { case 1: *(uint8_t *)data = val; break; case 2: *(uint16_t *)data = val; break; case 4: *(uint32_t *)data = val; break; case 8: *(uint64_t *)data = val; break; } return 0; } int qla4_82xx_pci_mem_write_2M(struct scsi_qla_host *ha, u64 off, void *data, int size) { int i, j, ret = 0, loop, sz[2], off0; int scale, shift_amount, startword; uint32_t temp; uint64_t off8, mem_crb, tmpw, word[2] = {0, 0}; /* * If not MN, go check for MS or invalid. */ if (off >= QLA8XXX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX) mem_crb = QLA82XX_CRB_QDR_NET; else { mem_crb = QLA82XX_CRB_DDR_NET; if (qla4_82xx_pci_mem_bound_check(ha, off, size) == 0) return qla4_82xx_pci_mem_write_direct(ha, off, data, size); } off0 = off & 0x7; sz[0] = (size < (8 - off0)) ? size : (8 - off0); sz[1] = size - sz[0]; off8 = off & 0xfffffff0; loop = (((off & 0xf) + size - 1) >> 4) + 1; shift_amount = 4; scale = 2; startword = (off & 0xf)/8; for (i = 0; i < loop; i++) { if (qla4_82xx_pci_mem_read_2M(ha, off8 + (i << shift_amount), &word[i * scale], 8)) return -1; } switch (size) { case 1: tmpw = *((uint8_t *)data); break; case 2: tmpw = *((uint16_t *)data); break; case 4: tmpw = *((uint32_t *)data); break; case 8: default: tmpw = *((uint64_t *)data); break; } if (sz[0] == 8) word[startword] = tmpw; else { word[startword] &= ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8)); word[startword] |= tmpw << (off0 * 8); } if (sz[1] != 0) { word[startword+1] &= ~(~0ULL << (sz[1] * 8)); word[startword+1] |= tmpw >> (sz[0] * 8); } for (i = 0; i < loop; i++) { temp = off8 + (i << shift_amount); qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp); temp = 0; qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp); temp = word[i * scale] & 0xffffffff; qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp); temp = (word[i * scale] >> 32) & 0xffffffff; qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp); temp = word[i*scale + 1] & 0xffffffff; qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_WRDATA_UPPER_LO, temp); temp = (word[i*scale + 1] >> 32) & 0xffffffff; qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_WRDATA_UPPER_HI, temp); temp = MIU_TA_CTL_WRITE_ENABLE; qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_CTRL, temp); temp = MIU_TA_CTL_WRITE_START; qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_CTRL, temp); for (j = 0; j < MAX_CTL_CHECK; j++) { temp = qla4_82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL); if ((temp & MIU_TA_CTL_BUSY) == 0) break; } if (j >= MAX_CTL_CHECK) { if (printk_ratelimit()) ql4_printk(KERN_ERR, ha, "%s: failed to read through agent\n", __func__); ret = -1; break; } } return ret; } static int qla4_82xx_cmdpeg_ready(struct scsi_qla_host *ha, int pegtune_val) { u32 val = 0; int retries = 60; if (!pegtune_val) { do { val = qla4_82xx_rd_32(ha, CRB_CMDPEG_STATE); if ((val == PHAN_INITIALIZE_COMPLETE) || (val == PHAN_INITIALIZE_ACK)) return 0; set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(500); } while (--retries); if (!retries) { pegtune_val = qla4_82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_PEGTUNE_DONE); printk(KERN_WARNING "%s: init failed, " "pegtune_val = %x\n", __func__, pegtune_val); return -1; } } return 0; } static int qla4_82xx_rcvpeg_ready(struct scsi_qla_host *ha) { uint32_t state = 0; int loops = 0; /* Window 1 call */ read_lock(&ha->hw_lock); state = qla4_82xx_rd_32(ha, CRB_RCVPEG_STATE); read_unlock(&ha->hw_lock); while ((state != PHAN_PEG_RCV_INITIALIZED) && (loops < 30000)) { udelay(100); /* Window 1 call */ read_lock(&ha->hw_lock); state = qla4_82xx_rd_32(ha, CRB_RCVPEG_STATE); read_unlock(&ha->hw_lock); loops++; } if (loops >= 30000) { DEBUG2(ql4_printk(KERN_INFO, ha, "Receive Peg initialization not complete: 0x%x.\n", state)); return QLA_ERROR; } return QLA_SUCCESS; } void qla4_8xxx_set_drv_active(struct scsi_qla_host *ha) { uint32_t drv_active; drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE); /* * For ISP8324 and ISP8042, drv_active register has 1 bit per function, * shift 1 by func_num to set a bit for the function. * For ISP8022, drv_active has 4 bits per function */ if (is_qla8032(ha) || is_qla8042(ha)) drv_active |= (1 << ha->func_num); else drv_active |= (1 << (ha->func_num * 4)); ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n", __func__, ha->host_no, drv_active); qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_ACTIVE, drv_active); } void qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha) { uint32_t drv_active; drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE); /* * For ISP8324 and ISP8042, drv_active register has 1 bit per function, * shift 1 by func_num to set a bit for the function. * For ISP8022, drv_active has 4 bits per function */ if (is_qla8032(ha) || is_qla8042(ha)) drv_active &= ~(1 << (ha->func_num)); else drv_active &= ~(1 << (ha->func_num * 4)); ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n", __func__, ha->host_no, drv_active); qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_ACTIVE, drv_active); } inline int qla4_8xxx_need_reset(struct scsi_qla_host *ha) { uint32_t drv_state, drv_active; int rval; drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE); drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE); /* * For ISP8324 and ISP8042, drv_active register has 1 bit per function, * shift 1 by func_num to set a bit for the function. * For ISP8022, drv_active has 4 bits per function */ if (is_qla8032(ha) || is_qla8042(ha)) rval = drv_state & (1 << ha->func_num); else rval = drv_state & (1 << (ha->func_num * 4)); if ((test_bit(AF_EEH_BUSY, &ha->flags)) && drv_active) rval = 1; return rval; } void qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha) { uint32_t drv_state; drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE); /* * For ISP8324 and ISP8042, drv_active register has 1 bit per function, * shift 1 by func_num to set a bit for the function. * For ISP8022, drv_active has 4 bits per function */ if (is_qla8032(ha) || is_qla8042(ha)) drv_state |= (1 << ha->func_num); else drv_state |= (1 << (ha->func_num * 4)); ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n", __func__, ha->host_no, drv_state); qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, drv_state); } void qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha) { uint32_t drv_state; drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE); /* * For ISP8324 and ISP8042, drv_active register has 1 bit per function, * shift 1 by func_num to set a bit for the function. * For ISP8022, drv_active has 4 bits per function */ if (is_qla8032(ha) || is_qla8042(ha)) drv_state &= ~(1 << ha->func_num); else drv_state &= ~(1 << (ha->func_num * 4)); ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n", __func__, ha->host_no, drv_state); qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, drv_state); } static inline void qla4_8xxx_set_qsnt_ready(struct scsi_qla_host *ha) { uint32_t qsnt_state; qsnt_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE); /* * For ISP8324 and ISP8042, drv_active register has 1 bit per function, * shift 1 by func_num to set a bit for the function. * For ISP8022, drv_active has 4 bits per function. */ if (is_qla8032(ha) || is_qla8042(ha)) qsnt_state |= (1 << ha->func_num); else qsnt_state |= (2 << (ha->func_num * 4)); qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, qsnt_state); } static int qla4_82xx_start_firmware(struct scsi_qla_host *ha, uint32_t image_start) { uint16_t lnk; /* scrub dma mask expansion register */ qla4_82xx_wr_32(ha, CRB_DMA_SHIFT, 0x55555555); /* Overwrite stale initialization register values */ qla4_82xx_wr_32(ha, CRB_CMDPEG_STATE, 0); qla4_82xx_wr_32(ha, CRB_RCVPEG_STATE, 0); qla4_82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS1, 0); qla4_82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0); if (qla4_82xx_load_fw(ha, image_start) != QLA_SUCCESS) { printk("%s: Error trying to start fw!\n", __func__); return QLA_ERROR; } /* Handshake with the card before we register the devices. */ if (qla4_82xx_cmdpeg_ready(ha, 0) != QLA_SUCCESS) { printk("%s: Error during card handshake!\n", __func__); return QLA_ERROR; } /* Negotiated Link width */ pcie_capability_read_word(ha->pdev, PCI_EXP_LNKSTA, &lnk); ha->link_width = (lnk >> 4) & 0x3f; /* Synchronize with Receive peg */ return qla4_82xx_rcvpeg_ready(ha); } int qla4_82xx_try_start_fw(struct scsi_qla_host *ha) { int rval; /* * FW Load priority: * 1) Operational firmware residing in flash. * 2) Fail */ ql4_printk(KERN_INFO, ha, "FW: Retrieving flash offsets from FLT/FDT ...\n"); rval = qla4_8xxx_get_flash_info(ha); if (rval != QLA_SUCCESS) return rval; ql4_printk(KERN_INFO, ha, "FW: Attempting to load firmware from flash...\n"); rval = qla4_82xx_start_firmware(ha, ha->hw.flt_region_fw); if (rval != QLA_SUCCESS) { ql4_printk(KERN_ERR, ha, "FW: Load firmware from flash" " FAILED...\n"); return rval; } return rval; } void qla4_82xx_rom_lock_recovery(struct scsi_qla_host *ha) { if (qla4_82xx_rom_lock(ha)) { /* Someone else is holding the lock. */ dev_info(&ha->pdev->dev, "Resetting rom_lock\n"); } /* * Either we got the lock, or someone * else died while holding it. * In either case, unlock. */ qla4_82xx_rom_unlock(ha); } static uint32_t ql4_84xx_poll_wait_for_ready(struct scsi_qla_host *ha, uint32_t addr1, uint32_t mask) { unsigned long timeout; uint32_t rval = QLA_SUCCESS; uint32_t temp; timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS); do { ha->isp_ops->rd_reg_indirect(ha, addr1, &temp); if ((temp & mask) != 0) break; if (time_after_eq(jiffies, timeout)) { ql4_printk(KERN_INFO, ha, "Error in processing rdmdio entry\n"); return QLA_ERROR; } } while (1); return rval; } static uint32_t ql4_84xx_ipmdio_rd_reg(struct scsi_qla_host *ha, uint32_t addr1, uint32_t addr3, uint32_t mask, uint32_t addr, uint32_t *data_ptr) { int rval = QLA_SUCCESS; uint32_t temp; uint32_t data; rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask); if (rval) goto exit_ipmdio_rd_reg; temp = (0x40000000 | addr); ha->isp_ops->wr_reg_indirect(ha, addr1, temp); rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask); if (rval) goto exit_ipmdio_rd_reg; ha->isp_ops->rd_reg_indirect(ha, addr3, &data); *data_ptr = data; exit_ipmdio_rd_reg: return rval; } static uint32_t ql4_84xx_poll_wait_ipmdio_bus_idle(struct scsi_qla_host *ha, uint32_t addr1, uint32_t addr2, uint32_t addr3, uint32_t mask) { unsigned long timeout; uint32_t temp; uint32_t rval = QLA_SUCCESS; timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS); do { ql4_84xx_ipmdio_rd_reg(ha, addr1, addr3, mask, addr2, &temp); if ((temp & 0x1) != 1) break; if (time_after_eq(jiffies, timeout)) { ql4_printk(KERN_INFO, ha, "Error in processing mdiobus idle\n"); return QLA_ERROR; } } while (1); return rval; } static int ql4_84xx_ipmdio_wr_reg(struct scsi_qla_host *ha, uint32_t addr1, uint32_t addr3, uint32_t mask, uint32_t addr, uint32_t value) { int rval = QLA_SUCCESS; rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask); if (rval) goto exit_ipmdio_wr_reg; ha->isp_ops->wr_reg_indirect(ha, addr3, value); ha->isp_ops->wr_reg_indirect(ha, addr1, addr); rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask); if (rval) goto exit_ipmdio_wr_reg; exit_ipmdio_wr_reg: return rval; } static void qla4_8xxx_minidump_process_rdcrb(struct scsi_qla_host *ha, struct qla8xxx_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) { uint32_t r_addr, r_stride, loop_cnt, i, r_value; struct qla8xxx_minidump_entry_crb *crb_hdr; uint32_t *data_ptr = *d_ptr; DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__)); crb_hdr = (struct qla8xxx_minidump_entry_crb *)entry_hdr; r_addr = crb_hdr->addr; r_stride = crb_hdr->crb_strd.addr_stride; loop_cnt = crb_hdr->op_count; for (i = 0; i < loop_cnt; i++) { ha->isp_ops->rd_reg_indirect(ha, r_addr, &r_value); *data_ptr++ = cpu_to_le32(r_addr); *data_ptr++ = cpu_to_le32(r_value); r_addr += r_stride; } *d_ptr = data_ptr; } static int qla4_83xx_check_dma_engine_state(struct scsi_qla_host *ha) { int rval = QLA_SUCCESS; uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0; uint64_t dma_base_addr = 0; struct qla4_8xxx_minidump_template_hdr *tmplt_hdr = NULL; tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *) ha->fw_dump_tmplt_hdr; dma_eng_num = tmplt_hdr->saved_state_array[QLA83XX_PEX_DMA_ENGINE_INDEX]; dma_base_addr = QLA83XX_PEX_DMA_BASE_ADDRESS + (dma_eng_num * QLA83XX_PEX_DMA_NUM_OFFSET); /* Read the pex-dma's command-status-and-control register. */ rval = ha->isp_ops->rd_reg_indirect(ha, (dma_base_addr + QLA83XX_PEX_DMA_CMD_STS_AND_CNTRL), &cmd_sts_and_cntrl); if (rval) return QLA_ERROR; /* Check if requested pex-dma engine is available. */ if (cmd_sts_and_cntrl & BIT_31) return QLA_SUCCESS; else return QLA_ERROR; } static int qla4_83xx_start_pex_dma(struct scsi_qla_host *ha, struct qla4_83xx_minidump_entry_rdmem_pex_dma *m_hdr) { int rval = QLA_SUCCESS, wait = 0; uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0; uint64_t dma_base_addr = 0; struct qla4_8xxx_minidump_template_hdr *tmplt_hdr = NULL; tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *) ha->fw_dump_tmplt_hdr; dma_eng_num = tmplt_hdr->saved_state_array[QLA83XX_PEX_DMA_ENGINE_INDEX]; dma_base_addr = QLA83XX_PEX_DMA_BASE_ADDRESS + (dma_eng_num * QLA83XX_PEX_DMA_NUM_OFFSET); rval = ha->isp_ops->wr_reg_indirect(ha, dma_base_addr + QLA83XX_PEX_DMA_CMD_ADDR_LOW, m_hdr->desc_card_addr); if (rval) goto error_exit; rval = ha->isp_ops->wr_reg_indirect(ha, dma_base_addr + QLA83XX_PEX_DMA_CMD_ADDR_HIGH, 0); if (rval) goto error_exit; rval = ha->isp_ops->wr_reg_indirect(ha, dma_base_addr + QLA83XX_PEX_DMA_CMD_STS_AND_CNTRL, m_hdr->start_dma_cmd); if (rval) goto error_exit; /* Wait for dma operation to complete. */ for (wait = 0; wait < QLA83XX_PEX_DMA_MAX_WAIT; wait++) { rval = ha->isp_ops->rd_reg_indirect(ha, (dma_base_addr + QLA83XX_PEX_DMA_CMD_STS_AND_CNTRL), &cmd_sts_and_cntrl); if (rval) goto error_exit; if ((cmd_sts_and_cntrl & BIT_1) == 0) break; else udelay(10); } /* Wait a max of 100 ms, otherwise fallback to rdmem entry read */ if (wait >= QLA83XX_PEX_DMA_MAX_WAIT) { rval = QLA_ERROR; goto error_exit; } error_exit: return rval; } static int qla4_8xxx_minidump_pex_dma_read(struct scsi_qla_host *ha, struct qla8xxx_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) { int rval = QLA_SUCCESS; struct qla4_83xx_minidump_entry_rdmem_pex_dma *m_hdr = NULL; uint32_t size, read_size; uint8_t *data_ptr = (uint8_t *)*d_ptr; void *rdmem_buffer = NULL; dma_addr_t rdmem_dma; struct qla4_83xx_pex_dma_descriptor dma_desc; DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__)); rval = qla4_83xx_check_dma_engine_state(ha); if (rval != QLA_SUCCESS) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: DMA engine not available. Fallback to rdmem-read.\n", __func__)); return QLA_ERROR; } m_hdr = (struct qla4_83xx_minidump_entry_rdmem_pex_dma *)entry_hdr; rdmem_buffer = dma_alloc_coherent(&ha->pdev->dev, QLA83XX_PEX_DMA_READ_SIZE, &rdmem_dma, GFP_KERNEL); if (!rdmem_buffer) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unable to allocate rdmem dma buffer\n", __func__)); return QLA_ERROR; } /* Prepare pex-dma descriptor to be written to MS memory. */ /* dma-desc-cmd layout: * 0-3: dma-desc-cmd 0-3 * 4-7: pcid function number * 8-15: dma-desc-cmd 8-15 */ dma_desc.cmd.dma_desc_cmd = (m_hdr->dma_desc_cmd & 0xff0f); dma_desc.cmd.dma_desc_cmd |= ((PCI_FUNC(ha->pdev->devfn) & 0xf) << 0x4); dma_desc.dma_bus_addr = rdmem_dma; size = 0; read_size = 0; /* * Perform rdmem operation using pex-dma. * Prepare dma in chunks of QLA83XX_PEX_DMA_READ_SIZE. */ while (read_size < m_hdr->read_data_size) { if (m_hdr->read_data_size - read_size >= QLA83XX_PEX_DMA_READ_SIZE) size = QLA83XX_PEX_DMA_READ_SIZE; else { size = (m_hdr->read_data_size - read_size); if (rdmem_buffer) dma_free_coherent(&ha->pdev->dev, QLA83XX_PEX_DMA_READ_SIZE, rdmem_buffer, rdmem_dma); rdmem_buffer = dma_alloc_coherent(&ha->pdev->dev, size, &rdmem_dma, GFP_KERNEL); if (!rdmem_buffer) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unable to allocate rdmem dma buffer\n", __func__)); return QLA_ERROR; } dma_desc.dma_bus_addr = rdmem_dma; } dma_desc.src_addr = m_hdr->read_addr + read_size; dma_desc.cmd.read_data_size = size; /* Prepare: Write pex-dma descriptor to MS memory. */ rval = qla4_8xxx_ms_mem_write_128b(ha, (uint64_t)m_hdr->desc_card_addr, (uint32_t *)&dma_desc, (sizeof(struct qla4_83xx_pex_dma_descriptor)/16)); if (rval != QLA_SUCCESS) { ql4_printk(KERN_INFO, ha, "%s: Error writing rdmem-dma-init to MS !!!\n", __func__); goto error_exit; } DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Dma-desc: Instruct for rdmem dma (size 0x%x).\n", __func__, size)); /* Execute: Start pex-dma operation. */ rval = qla4_83xx_start_pex_dma(ha, m_hdr); if (rval != QLA_SUCCESS) { DEBUG2(ql4_printk(KERN_INFO, ha, "scsi(%ld): start-pex-dma failed rval=0x%x\n", ha->host_no, rval)); goto error_exit; } memcpy(data_ptr, rdmem_buffer, size); data_ptr += size; read_size += size; } DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s\n", __func__)); *d_ptr = (uint32_t *)data_ptr; error_exit: if (rdmem_buffer) dma_free_coherent(&ha->pdev->dev, size, rdmem_buffer, rdmem_dma); return rval; } static int qla4_8xxx_minidump_process_l2tag(struct scsi_qla_host *ha, struct qla8xxx_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) { uint32_t addr, r_addr, c_addr, t_r_addr; uint32_t i, k, loop_count, t_value, r_cnt, r_value; unsigned long p_wait, w_time, p_mask; uint32_t c_value_w, c_value_r; struct qla8xxx_minidump_entry_cache *cache_hdr; int rval = QLA_ERROR; uint32_t *data_ptr = *d_ptr; DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__)); cache_hdr = (struct qla8xxx_minidump_entry_cache *)entry_hdr; loop_count = cache_hdr->op_count; r_addr = cache_hdr->read_addr; c_addr = cache_hdr->control_addr; c_value_w = cache_hdr->cache_ctrl.write_value; t_r_addr = cache_hdr->tag_reg_addr; t_value = cache_hdr->addr_ctrl.init_tag_value; r_cnt = cache_hdr->read_ctrl.read_addr_cnt; p_wait = cache_hdr->cache_ctrl.poll_wait; p_mask = cache_hdr->cache_ctrl.poll_mask; for (i = 0; i < loop_count; i++) { ha->isp_ops->wr_reg_indirect(ha, t_r_addr, t_value); if (c_value_w) ha->isp_ops->wr_reg_indirect(ha, c_addr, c_value_w); if (p_mask) { w_time = jiffies + p_wait; do { ha->isp_ops->rd_reg_indirect(ha, c_addr, &c_value_r); if ((c_value_r & p_mask) == 0) { break; } else if (time_after_eq(jiffies, w_time)) { /* capturing dump failed */ return rval; } } while (1); } addr = r_addr; for (k = 0; k < r_cnt; k++) { ha->isp_ops->rd_reg_indirect(ha, addr, &r_value); *data_ptr++ = cpu_to_le32(r_value); addr += cache_hdr->read_ctrl.read_addr_stride; } t_value += cache_hdr->addr_ctrl.tag_value_stride; } *d_ptr = data_ptr; return QLA_SUCCESS; } static int qla4_8xxx_minidump_process_control(struct scsi_qla_host *ha, struct qla8xxx_minidump_entry_hdr *entry_hdr) { struct qla8xxx_minidump_entry_crb *crb_entry; uint32_t read_value, opcode, poll_time, addr, index, rval = QLA_SUCCESS; uint32_t crb_addr; unsigned long wtime; struct qla4_8xxx_minidump_template_hdr *tmplt_hdr; int i; DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__)); tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *) ha->fw_dump_tmplt_hdr; crb_entry = (struct qla8xxx_minidump_entry_crb *)entry_hdr; crb_addr = crb_entry->addr; for (i = 0; i < crb_entry->op_count; i++) { opcode = crb_entry->crb_ctrl.opcode; if (opcode & QLA8XXX_DBG_OPCODE_WR) { ha->isp_ops->wr_reg_indirect(ha, crb_addr, crb_entry->value_1); opcode &= ~QLA8XXX_DBG_OPCODE_WR; } if (opcode & QLA8XXX_DBG_OPCODE_RW) { ha->isp_ops->rd_reg_indirect(ha, crb_addr, &read_value); ha->isp_ops->wr_reg_indirect(ha, crb_addr, read_value); opcode &= ~QLA8XXX_DBG_OPCODE_RW; } if (opcode & QLA8XXX_DBG_OPCODE_AND) { ha->isp_ops->rd_reg_indirect(ha, crb_addr, &read_value); read_value &= crb_entry->value_2; opcode &= ~QLA8XXX_DBG_OPCODE_AND; if (opcode & QLA8XXX_DBG_OPCODE_OR) { read_value |= crb_entry->value_3; opcode &= ~QLA8XXX_DBG_OPCODE_OR; } ha->isp_ops->wr_reg_indirect(ha, crb_addr, read_value); } if (opcode & QLA8XXX_DBG_OPCODE_OR) { ha->isp_ops->rd_reg_indirect(ha, crb_addr, &read_value); read_value |= crb_entry->value_3; ha->isp_ops->wr_reg_indirect(ha, crb_addr, read_value); opcode &= ~QLA8XXX_DBG_OPCODE_OR; } if (opcode & QLA8XXX_DBG_OPCODE_POLL) { poll_time = crb_entry->crb_strd.poll_timeout; wtime = jiffies + poll_time; ha->isp_ops->rd_reg_indirect(ha, crb_addr, &read_value); do { if ((read_value & crb_entry->value_2) == crb_entry->value_1) { break; } else if (time_after_eq(jiffies, wtime)) { /* capturing dump failed */ rval = QLA_ERROR; break; } else { ha->isp_ops->rd_reg_indirect(ha, crb_addr, &read_value); } } while (1); opcode &= ~QLA8XXX_DBG_OPCODE_POLL; } if (opcode & QLA8XXX_DBG_OPCODE_RDSTATE) { if (crb_entry->crb_strd.state_index_a) { index = crb_entry->crb_strd.state_index_a; addr = tmplt_hdr->saved_state_array[index]; } else { addr = crb_addr; } ha->isp_ops->rd_reg_indirect(ha, addr, &read_value); index = crb_entry->crb_ctrl.state_index_v; tmplt_hdr->saved_state_array[index] = read_value; opcode &= ~QLA8XXX_DBG_OPCODE_RDSTATE; } if (opcode & QLA8XXX_DBG_OPCODE_WRSTATE) { if (crb_entry->crb_strd.state_index_a) { index = crb_entry->crb_strd.state_index_a; addr = tmplt_hdr->saved_state_array[index]; } else { addr = crb_addr; } if (crb_entry->crb_ctrl.state_index_v) { index = crb_entry->crb_ctrl.state_index_v; read_value = tmplt_hdr->saved_state_array[index]; } else { read_value = crb_entry->value_1; } ha->isp_ops->wr_reg_indirect(ha, addr, read_value); opcode &= ~QLA8XXX_DBG_OPCODE_WRSTATE; } if (opcode & QLA8XXX_DBG_OPCODE_MDSTATE) { index = crb_entry->crb_ctrl.state_index_v; read_value = tmplt_hdr->saved_state_array[index]; read_value <<= crb_entry->crb_ctrl.shl; read_value >>= crb_entry->crb_ctrl.shr; if (crb_entry->value_2) read_value &= crb_entry->value_2; read_value |= crb_entry->value_3; read_value += crb_entry->value_1; tmplt_hdr->saved_state_array[index] = read_value; opcode &= ~QLA8XXX_DBG_OPCODE_MDSTATE; } crb_addr += crb_entry->crb_strd.addr_stride; } DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s\n", __func__)); return rval; } static void qla4_8xxx_minidump_process_rdocm(struct scsi_qla_host *ha, struct qla8xxx_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) { uint32_t r_addr, r_stride, loop_cnt, i, r_value; struct qla8xxx_minidump_entry_rdocm *ocm_hdr; uint32_t *data_ptr = *d_ptr; DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__)); ocm_hdr = (struct qla8xxx_minidump_entry_rdocm *)entry_hdr; r_addr = ocm_hdr->read_addr; r_stride = ocm_hdr->read_addr_stride; loop_cnt = ocm_hdr->op_count; DEBUG2(ql4_printk(KERN_INFO, ha, "[%s]: r_addr: 0x%x, r_stride: 0x%x, loop_cnt: 0x%x\n", __func__, r_addr, r_stride, loop_cnt)); for (i = 0; i < loop_cnt; i++) { r_value = readl((void __iomem *)(r_addr + ha->nx_pcibase)); *data_ptr++ = cpu_to_le32(r_value); r_addr += r_stride; } DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s datacount: 0x%lx\n", __func__, (long unsigned int) (loop_cnt * sizeof(uint32_t)))); *d_ptr = data_ptr; } static void qla4_8xxx_minidump_process_rdmux(struct scsi_qla_host *ha, struct qla8xxx_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) { uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value; struct qla8xxx_minidump_entry_mux *mux_hdr; uint32_t *data_ptr = *d_ptr; DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__)); mux_hdr = (struct qla8xxx_minidump_entry_mux *)entry_hdr; r_addr = mux_hdr->read_addr; s_addr = mux_hdr->select_addr; s_stride = mux_hdr->select_value_stride; s_value = mux_hdr->select_value; loop_cnt = mux_hdr->op_count; for (i = 0; i < loop_cnt; i++) { ha->isp_ops->wr_reg_indirect(ha, s_addr, s_value); ha->isp_ops->rd_reg_indirect(ha, r_addr, &r_value); *data_ptr++ = cpu_to_le32(s_value); *data_ptr++ = cpu_to_le32(r_value); s_value += s_stride; } *d_ptr = data_ptr; } static void qla4_8xxx_minidump_process_l1cache(struct scsi_qla_host *ha, struct qla8xxx_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) { uint32_t addr, r_addr, c_addr, t_r_addr; uint32_t i, k, loop_count, t_value, r_cnt, r_value; uint32_t c_value_w; struct qla8xxx_minidump_entry_cache *cache_hdr; uint32_t *data_ptr = *d_ptr; cache_hdr = (struct qla8xxx_minidump_entry_cache *)entry_hdr; loop_count = cache_hdr->op_count; r_addr = cache_hdr->read_addr; c_addr = cache_hdr->control_addr; c_value_w = cache_hdr->cache_ctrl.write_value; t_r_addr = cache_hdr->tag_reg_addr; t_value = cache_hdr->addr_ctrl.init_tag_value; r_cnt = cache_hdr->read_ctrl.read_addr_cnt; for (i = 0; i < loop_count; i++) { ha->isp_ops->wr_reg_indirect(ha, t_r_addr, t_value); ha->isp_ops->wr_reg_indirect(ha, c_addr, c_value_w); addr = r_addr; for (k = 0; k < r_cnt; k++) { ha->isp_ops->rd_reg_indirect(ha, addr, &r_value); *data_ptr++ = cpu_to_le32(r_value); addr += cache_hdr->read_ctrl.read_addr_stride; } t_value += cache_hdr->addr_ctrl.tag_value_stride; } *d_ptr = data_ptr; } static void qla4_8xxx_minidump_process_queue(struct scsi_qla_host *ha, struct qla8xxx_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) { uint32_t s_addr, r_addr; uint32_t r_stride, r_value, r_cnt, qid = 0; uint32_t i, k, loop_cnt; struct qla8xxx_minidump_entry_queue *q_hdr; uint32_t *data_ptr = *d_ptr; DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__)); q_hdr = (struct qla8xxx_minidump_entry_queue *)entry_hdr; s_addr = q_hdr->select_addr; r_cnt = q_hdr->rd_strd.read_addr_cnt; r_stride = q_hdr->rd_strd.read_addr_stride; loop_cnt = q_hdr->op_count; for (i = 0; i < loop_cnt; i++) { ha->isp_ops->wr_reg_indirect(ha, s_addr, qid); r_addr = q_hdr->read_addr; for (k = 0; k < r_cnt; k++) { ha->isp_ops->rd_reg_indirect(ha, r_addr, &r_value); *data_ptr++ = cpu_to_le32(r_value); r_addr += r_stride; } qid += q_hdr->q_strd.queue_id_stride; } *d_ptr = data_ptr; } #define MD_DIRECT_ROM_WINDOW 0x42110030 #define MD_DIRECT_ROM_READ_BASE 0x42150000 static void qla4_82xx_minidump_process_rdrom(struct scsi_qla_host *ha, struct qla8xxx_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) { uint32_t r_addr, r_value; uint32_t i, loop_cnt; struct qla8xxx_minidump_entry_rdrom *rom_hdr; uint32_t *data_ptr = *d_ptr; DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__)); rom_hdr = (struct qla8xxx_minidump_entry_rdrom *)entry_hdr; r_addr = rom_hdr->read_addr; loop_cnt = rom_hdr->read_data_size/sizeof(uint32_t); DEBUG2(ql4_printk(KERN_INFO, ha, "[%s]: flash_addr: 0x%x, read_data_size: 0x%x\n", __func__, r_addr, loop_cnt)); for (i = 0; i < loop_cnt; i++) { ha->isp_ops->wr_reg_indirect(ha, MD_DIRECT_ROM_WINDOW, (r_addr & 0xFFFF0000)); ha->isp_ops->rd_reg_indirect(ha, MD_DIRECT_ROM_READ_BASE + (r_addr & 0x0000FFFF), &r_value); *data_ptr++ = cpu_to_le32(r_value); r_addr += sizeof(uint32_t); } *d_ptr = data_ptr; } #define MD_MIU_TEST_AGT_CTRL 0x41000090 #define MD_MIU_TEST_AGT_ADDR_LO 0x41000094 #define MD_MIU_TEST_AGT_ADDR_HI 0x41000098 static int __qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha, struct qla8xxx_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) { uint32_t r_addr, r_value, r_data; uint32_t i, j, loop_cnt; struct qla8xxx_minidump_entry_rdmem *m_hdr; unsigned long flags; uint32_t *data_ptr = *d_ptr; DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__)); m_hdr = (struct qla8xxx_minidump_entry_rdmem *)entry_hdr; r_addr = m_hdr->read_addr; loop_cnt = m_hdr->read_data_size/16; DEBUG2(ql4_printk(KERN_INFO, ha, "[%s]: Read addr: 0x%x, read_data_size: 0x%x\n", __func__, r_addr, m_hdr->read_data_size)); if (r_addr & 0xf) { DEBUG2(ql4_printk(KERN_INFO, ha, "[%s]: Read addr 0x%x not 16 bytes aligned\n", __func__, r_addr)); return QLA_ERROR; } if (m_hdr->read_data_size % 16) { DEBUG2(ql4_printk(KERN_INFO, ha, "[%s]: Read data[0x%x] not multiple of 16 bytes\n", __func__, m_hdr->read_data_size)); return QLA_ERROR; } DEBUG2(ql4_printk(KERN_INFO, ha, "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n", __func__, r_addr, m_hdr->read_data_size, loop_cnt)); write_lock_irqsave(&ha->hw_lock, flags); for (i = 0; i < loop_cnt; i++) { ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_LO, r_addr); r_value = 0; ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_HI, r_value); r_value = MIU_TA_CTL_ENABLE; ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL, r_value); r_value = MIU_TA_CTL_START_ENABLE; ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL, r_value); for (j = 0; j < MAX_CTL_CHECK; j++) { ha->isp_ops->rd_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL, &r_value); if ((r_value & MIU_TA_CTL_BUSY) == 0) break; } if (j >= MAX_CTL_CHECK) { printk_ratelimited(KERN_ERR "%s: failed to read through agent\n", __func__); write_unlock_irqrestore(&ha->hw_lock, flags); return QLA_SUCCESS; } for (j = 0; j < 4; j++) { ha->isp_ops->rd_reg_indirect(ha, MD_MIU_TEST_AGT_RDDATA[j], &r_data); *data_ptr++ = cpu_to_le32(r_data); } r_addr += 16; } write_unlock_irqrestore(&ha->hw_lock, flags); DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s datacount: 0x%x\n", __func__, (loop_cnt * 16))); *d_ptr = data_ptr; return QLA_SUCCESS; } static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha, struct qla8xxx_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) { uint32_t *data_ptr = *d_ptr; int rval = QLA_SUCCESS; rval = qla4_8xxx_minidump_pex_dma_read(ha, entry_hdr, &data_ptr); if (rval != QLA_SUCCESS) rval = __qla4_8xxx_minidump_process_rdmem(ha, entry_hdr, &data_ptr); *d_ptr = data_ptr; return rval; } static void qla4_8xxx_mark_entry_skipped(struct scsi_qla_host *ha, struct qla8xxx_minidump_entry_hdr *entry_hdr, int index) { entry_hdr->d_ctrl.driver_flags |= QLA8XXX_DBG_SKIPPED_FLAG; DEBUG2(ql4_printk(KERN_INFO, ha, "scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n", ha->host_no, index, entry_hdr->entry_type, entry_hdr->d_ctrl.entry_capture_mask)); /* If driver encounters a new entry type that it cannot process, * it should just skip the entry and adjust the total buffer size by * from subtracting the skipped bytes from it */ ha->fw_dump_skip_size += entry_hdr->entry_capture_size; } /* ISP83xx functions to process new minidump entries... */ static uint32_t qla83xx_minidump_process_pollrd(struct scsi_qla_host *ha, struct qla8xxx_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) { uint32_t r_addr, s_addr, s_value, r_value, poll_wait, poll_mask; uint16_t s_stride, i; uint32_t *data_ptr = *d_ptr; uint32_t rval = QLA_SUCCESS; struct qla83xx_minidump_entry_pollrd *pollrd_hdr; pollrd_hdr = (struct qla83xx_minidump_entry_pollrd *)entry_hdr; s_addr = le32_to_cpu(pollrd_hdr->select_addr); r_addr = le32_to_cpu(pollrd_hdr->read_addr); s_value = le32_to_cpu(pollrd_hdr->select_value); s_stride = le32_to_cpu(pollrd_hdr->select_value_stride); poll_wait = le32_to_cpu(pollrd_hdr->poll_wait); poll_mask = le32_to_cpu(pollrd_hdr->poll_mask); for (i = 0; i < le32_to_cpu(pollrd_hdr->op_count); i++) { ha->isp_ops->wr_reg_indirect(ha, s_addr, s_value); poll_wait = le32_to_cpu(pollrd_hdr->poll_wait); while (1) { ha->isp_ops->rd_reg_indirect(ha, s_addr, &r_value); if ((r_value & poll_mask) != 0) { break; } else { msleep(1); if (--poll_wait == 0) { ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n", __func__); rval = QLA_ERROR; goto exit_process_pollrd; } } } ha->isp_ops->rd_reg_indirect(ha, r_addr, &r_value); *data_ptr++ = cpu_to_le32(s_value); *data_ptr++ = cpu_to_le32(r_value); s_value += s_stride; } *d_ptr = data_ptr; exit_process_pollrd: return rval; } static uint32_t qla4_84xx_minidump_process_rddfe(struct scsi_qla_host *ha, struct qla8xxx_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) { int loop_cnt; uint32_t addr1, addr2, value, data, temp, wrval; uint8_t stride, stride2; uint16_t count; uint32_t poll, mask, modify_mask; uint32_t wait_count = 0; uint32_t *data_ptr = *d_ptr; struct qla8044_minidump_entry_rddfe *rddfe; uint32_t rval = QLA_SUCCESS; rddfe = (struct qla8044_minidump_entry_rddfe *)entry_hdr; addr1 = le32_to_cpu(rddfe->addr_1); value = le32_to_cpu(rddfe->value); stride = le32_to_cpu(rddfe->stride); stride2 = le32_to_cpu(rddfe->stride2); count = le32_to_cpu(rddfe->count); poll = le32_to_cpu(rddfe->poll); mask = le32_to_cpu(rddfe->mask); modify_mask = le32_to_cpu(rddfe->modify_mask); addr2 = addr1 + stride; for (loop_cnt = 0x0; loop_cnt < count; loop_cnt++) { ha->isp_ops->wr_reg_indirect(ha, addr1, (0x40000000 | value)); wait_count = 0; while (wait_count < poll) { ha->isp_ops->rd_reg_indirect(ha, addr1, &temp); if ((temp & mask) != 0) break; wait_count++; } if (wait_count == poll) { ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n", __func__); rval = QLA_ERROR; goto exit_process_rddfe; } else { ha->isp_ops->rd_reg_indirect(ha, addr2, &temp); temp = temp & modify_mask; temp = (temp | ((loop_cnt << 16) | loop_cnt)); wrval = ((temp << 16) | temp); ha->isp_ops->wr_reg_indirect(ha, addr2, wrval); ha->isp_ops->wr_reg_indirect(ha, addr1, value); wait_count = 0; while (wait_count < poll) { ha->isp_ops->rd_reg_indirect(ha, addr1, &temp); if ((temp & mask) != 0) break; wait_count++; } if (wait_count == poll) { ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n", __func__); rval = QLA_ERROR; goto exit_process_rddfe; } ha->isp_ops->wr_reg_indirect(ha, addr1, ((0x40000000 | value) + stride2)); wait_count = 0; while (wait_count < poll) { ha->isp_ops->rd_reg_indirect(ha, addr1, &temp); if ((temp & mask) != 0) break; wait_count++; } if (wait_count == poll) { ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n", __func__); rval = QLA_ERROR; goto exit_process_rddfe; } ha->isp_ops->rd_reg_indirect(ha, addr2, &data); *data_ptr++ = cpu_to_le32(wrval); *data_ptr++ = cpu_to_le32(data); } } *d_ptr = data_ptr; exit_process_rddfe: return rval; } static uint32_t qla4_84xx_minidump_process_rdmdio(struct scsi_qla_host *ha, struct qla8xxx_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) { int rval = QLA_SUCCESS; uint32_t addr1, addr2, value1, value2, data, selval; uint8_t stride1, stride2; uint32_t addr3, addr4, addr5, addr6, addr7; uint16_t count, loop_cnt; uint32_t mask; uint32_t *data_ptr = *d_ptr; struct qla8044_minidump_entry_rdmdio *rdmdio; rdmdio = (struct qla8044_minidump_entry_rdmdio *)entry_hdr; addr1 = le32_to_cpu(rdmdio->addr_1); addr2 = le32_to_cpu(rdmdio->addr_2); value1 = le32_to_cpu(rdmdio->value_1); stride1 = le32_to_cpu(rdmdio->stride_1); stride2 = le32_to_cpu(rdmdio->stride_2); count = le32_to_cpu(rdmdio->count); mask = le32_to_cpu(rdmdio->mask); value2 = le32_to_cpu(rdmdio->value_2); addr3 = addr1 + stride1; for (loop_cnt = 0; loop_cnt < count; loop_cnt++) { rval = ql4_84xx_poll_wait_ipmdio_bus_idle(ha, addr1, addr2, addr3, mask); if (rval) goto exit_process_rdmdio; addr4 = addr2 - stride1; rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask, addr4, value2); if (rval) goto exit_process_rdmdio; addr5 = addr2 - (2 * stride1); rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask, addr5, value1); if (rval) goto exit_process_rdmdio; addr6 = addr2 - (3 * stride1); rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask, addr6, 0x2); if (rval) goto exit_process_rdmdio; rval = ql4_84xx_poll_wait_ipmdio_bus_idle(ha, addr1, addr2, addr3, mask); if (rval) goto exit_process_rdmdio; addr7 = addr2 - (4 * stride1); rval = ql4_84xx_ipmdio_rd_reg(ha, addr1, addr3, mask, addr7, &data); if (rval) goto exit_process_rdmdio; selval = (value2 << 18) | (value1 << 2) | 2; stride2 = le32_to_cpu(rdmdio->stride_2); *data_ptr++ = cpu_to_le32(selval); *data_ptr++ = cpu_to_le32(data); value1 = value1 + stride2; *d_ptr = data_ptr; } exit_process_rdmdio: return rval; } static uint32_t qla4_84xx_minidump_process_pollwr(struct scsi_qla_host *ha, struct qla8xxx_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) { uint32_t addr1, addr2, value1, value2, poll, r_value; struct qla8044_minidump_entry_pollwr *pollwr_hdr; uint32_t wait_count = 0; uint32_t rval = QLA_SUCCESS; pollwr_hdr = (struct qla8044_minidump_entry_pollwr *)entry_hdr; addr1 = le32_to_cpu(pollwr_hdr->addr_1); addr2 = le32_to_cpu(pollwr_hdr->addr_2); value1 = le32_to_cpu(pollwr_hdr->value_1); value2 = le32_to_cpu(pollwr_hdr->value_2); poll = le32_to_cpu(pollwr_hdr->poll); while (wait_count < poll) { ha->isp_ops->rd_reg_indirect(ha, addr1, &r_value); if ((r_value & poll) != 0) break; wait_count++; } if (wait_count == poll) { ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n", __func__); rval = QLA_ERROR; goto exit_process_pollwr; } ha->isp_ops->wr_reg_indirect(ha, addr2, value2); ha->isp_ops->wr_reg_indirect(ha, addr1, value1); wait_count = 0; while (wait_count < poll) { ha->isp_ops->rd_reg_indirect(ha, addr1, &r_value); if ((r_value & poll) != 0) break; wait_count++; } exit_process_pollwr: return rval; } static void qla83xx_minidump_process_rdmux2(struct scsi_qla_host *ha, struct qla8xxx_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) { uint32_t sel_val1, sel_val2, t_sel_val, data, i; uint32_t sel_addr1, sel_addr2, sel_val_mask, read_addr; struct qla83xx_minidump_entry_rdmux2 *rdmux2_hdr; uint32_t *data_ptr = *d_ptr; rdmux2_hdr = (struct qla83xx_minidump_entry_rdmux2 *)entry_hdr; sel_val1 = le32_to_cpu(rdmux2_hdr->select_value_1); sel_val2 = le32_to_cpu(rdmux2_hdr->select_value_2); sel_addr1 = le32_to_cpu(rdmux2_hdr->select_addr_1); sel_addr2 = le32_to_cpu(rdmux2_hdr->select_addr_2); sel_val_mask = le32_to_cpu(rdmux2_hdr->select_value_mask); read_addr = le32_to_cpu(rdmux2_hdr->read_addr); for (i = 0; i < rdmux2_hdr->op_count; i++) { ha->isp_ops->wr_reg_indirect(ha, sel_addr1, sel_val1); t_sel_val = sel_val1 & sel_val_mask; *data_ptr++ = cpu_to_le32(t_sel_val); ha->isp_ops->wr_reg_indirect(ha, sel_addr2, t_sel_val); ha->isp_ops->rd_reg_indirect(ha, read_addr, &data); *data_ptr++ = cpu_to_le32(data); ha->isp_ops->wr_reg_indirect(ha, sel_addr1, sel_val2); t_sel_val = sel_val2 & sel_val_mask; *data_ptr++ = cpu_to_le32(t_sel_val); ha->isp_ops->wr_reg_indirect(ha, sel_addr2, t_sel_val); ha->isp_ops->rd_reg_indirect(ha, read_addr, &data); *data_ptr++ = cpu_to_le32(data); sel_val1 += rdmux2_hdr->select_value_stride; sel_val2 += rdmux2_hdr->select_value_stride; } *d_ptr = data_ptr; } static uint32_t qla83xx_minidump_process_pollrdmwr(struct scsi_qla_host *ha, struct qla8xxx_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) { uint32_t poll_wait, poll_mask, r_value, data; uint32_t addr_1, addr_2, value_1, value_2; uint32_t *data_ptr = *d_ptr; uint32_t rval = QLA_SUCCESS; struct qla83xx_minidump_entry_pollrdmwr *poll_hdr; poll_hdr = (struct qla83xx_minidump_entry_pollrdmwr *)entry_hdr; addr_1 = le32_to_cpu(poll_hdr->addr_1); addr_2 = le32_to_cpu(poll_hdr->addr_2); value_1 = le32_to_cpu(poll_hdr->value_1); value_2 = le32_to_cpu(poll_hdr->value_2); poll_mask = le32_to_cpu(poll_hdr->poll_mask); ha->isp_ops->wr_reg_indirect(ha, addr_1, value_1); poll_wait = le32_to_cpu(poll_hdr->poll_wait); while (1) { ha->isp_ops->rd_reg_indirect(ha, addr_1, &r_value); if ((r_value & poll_mask) != 0) { break; } else { msleep(1); if (--poll_wait == 0) { ql4_printk(KERN_ERR, ha, "%s: TIMEOUT_1\n", __func__); rval = QLA_ERROR; goto exit_process_pollrdmwr; } } } ha->isp_ops->rd_reg_indirect(ha, addr_2, &data); data &= le32_to_cpu(poll_hdr->modify_mask); ha->isp_ops->wr_reg_indirect(ha, addr_2, data); ha->isp_ops->wr_reg_indirect(ha, addr_1, value_2); poll_wait = le32_to_cpu(poll_hdr->poll_wait); while (1) { ha->isp_ops->rd_reg_indirect(ha, addr_1, &r_value); if ((r_value & poll_mask) != 0) { break; } else { msleep(1); if (--poll_wait == 0) { ql4_printk(KERN_ERR, ha, "%s: TIMEOUT_2\n", __func__); rval = QLA_ERROR; goto exit_process_pollrdmwr; } } } *data_ptr++ = cpu_to_le32(addr_2); *data_ptr++ = cpu_to_le32(data); *d_ptr = data_ptr; exit_process_pollrdmwr: return rval; } static uint32_t qla4_83xx_minidump_process_rdrom(struct scsi_qla_host *ha, struct qla8xxx_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) { uint32_t fl_addr, u32_count, rval; struct qla8xxx_minidump_entry_rdrom *rom_hdr; uint32_t *data_ptr = *d_ptr; rom_hdr = (struct qla8xxx_minidump_entry_rdrom *)entry_hdr; fl_addr = le32_to_cpu(rom_hdr->read_addr); u32_count = le32_to_cpu(rom_hdr->read_data_size)/sizeof(uint32_t); DEBUG2(ql4_printk(KERN_INFO, ha, "[%s]: fl_addr: 0x%x, count: 0x%x\n", __func__, fl_addr, u32_count)); rval = qla4_83xx_lockless_flash_read_u32(ha, fl_addr, (u8 *)(data_ptr), u32_count); if (rval == QLA_ERROR) { ql4_printk(KERN_ERR, ha, "%s: Flash Read Error,Count=%d\n", __func__, u32_count); goto exit_process_rdrom; } data_ptr += u32_count; *d_ptr = data_ptr; exit_process_rdrom: return rval; } /** * qla4_8xxx_collect_md_data - Retrieve firmware minidump data. * @ha: pointer to adapter structure **/ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha) { int num_entry_hdr = 0; struct qla8xxx_minidump_entry_hdr *entry_hdr; struct qla4_8xxx_minidump_template_hdr *tmplt_hdr; uint32_t *data_ptr; uint32_t data_collected = 0; int i, rval = QLA_ERROR; uint64_t now; uint32_t timestamp; ha->fw_dump_skip_size = 0; if (!ha->fw_dump) { ql4_printk(KERN_INFO, ha, "%s(%ld) No buffer to dump\n", __func__, ha->host_no); return rval; } tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *) ha->fw_dump_tmplt_hdr; data_ptr = (uint32_t *)((uint8_t *)ha->fw_dump + ha->fw_dump_tmplt_size); data_collected += ha->fw_dump_tmplt_size; num_entry_hdr = tmplt_hdr->num_of_entries; ql4_printk(KERN_INFO, ha, "[%s]: starting data ptr: %p\n", __func__, data_ptr); ql4_printk(KERN_INFO, ha, "[%s]: no of entry headers in Template: 0x%x\n", __func__, num_entry_hdr); ql4_printk(KERN_INFO, ha, "[%s]: Capture Mask obtained: 0x%x\n", __func__, ha->fw_dump_capture_mask); ql4_printk(KERN_INFO, ha, "[%s]: Total_data_size 0x%x, %d obtained\n", __func__, ha->fw_dump_size, ha->fw_dump_size); /* Update current timestamp before taking dump */ now = get_jiffies_64(); timestamp = (u32)(jiffies_to_msecs(now) / 1000); tmplt_hdr->driver_timestamp = timestamp; entry_hdr = (struct qla8xxx_minidump_entry_hdr *) (((uint8_t *)ha->fw_dump_tmplt_hdr) + tmplt_hdr->first_entry_offset); if (is_qla8032(ha) || is_qla8042(ha)) tmplt_hdr->saved_state_array[QLA83XX_SS_OCM_WNDREG_INDEX] = tmplt_hdr->ocm_window_reg[ha->func_num]; /* Walk through the entry headers - validate/perform required action */ for (i = 0; i < num_entry_hdr; i++) { if (data_collected > ha->fw_dump_size) { ql4_printk(KERN_INFO, ha, "Data collected: [0x%x], Total Dump size: [0x%x]\n", data_collected, ha->fw_dump_size); return rval; } if (!(entry_hdr->d_ctrl.entry_capture_mask & ha->fw_dump_capture_mask)) { entry_hdr->d_ctrl.driver_flags |= QLA8XXX_DBG_SKIPPED_FLAG; goto skip_nxt_entry; } DEBUG2(ql4_printk(KERN_INFO, ha, "Data collected: [0x%x], Dump size left:[0x%x]\n", data_collected, (ha->fw_dump_size - data_collected))); /* Decode the entry type and take required action to capture * debug data */ switch (entry_hdr->entry_type) { case QLA8XXX_RDEND: qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); break; case QLA8XXX_CNTRL: rval = qla4_8xxx_minidump_process_control(ha, entry_hdr); if (rval != QLA_SUCCESS) { qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); goto md_failed; } break; case QLA8XXX_RDCRB: qla4_8xxx_minidump_process_rdcrb(ha, entry_hdr, &data_ptr); break; case QLA8XXX_RDMEM: rval = qla4_8xxx_minidump_process_rdmem(ha, entry_hdr, &data_ptr); if (rval != QLA_SUCCESS) { qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); goto md_failed; } break; case QLA8XXX_BOARD: case QLA8XXX_RDROM: if (is_qla8022(ha)) { qla4_82xx_minidump_process_rdrom(ha, entry_hdr, &data_ptr); } else if (is_qla8032(ha) || is_qla8042(ha)) { rval = qla4_83xx_minidump_process_rdrom(ha, entry_hdr, &data_ptr); if (rval != QLA_SUCCESS) qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); } break; case QLA8XXX_L2DTG: case QLA8XXX_L2ITG: case QLA8XXX_L2DAT: case QLA8XXX_L2INS: rval = qla4_8xxx_minidump_process_l2tag(ha, entry_hdr, &data_ptr); if (rval != QLA_SUCCESS) { qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); goto md_failed; } break; case QLA8XXX_L1DTG: case QLA8XXX_L1ITG: case QLA8XXX_L1DAT: case QLA8XXX_L1INS: qla4_8xxx_minidump_process_l1cache(ha, entry_hdr, &data_ptr); break; case QLA8XXX_RDOCM: qla4_8xxx_minidump_process_rdocm(ha, entry_hdr, &data_ptr); break; case QLA8XXX_RDMUX: qla4_8xxx_minidump_process_rdmux(ha, entry_hdr, &data_ptr); break; case QLA8XXX_QUEUE: qla4_8xxx_minidump_process_queue(ha, entry_hdr, &data_ptr); break; case QLA83XX_POLLRD: if (is_qla8022(ha)) { qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); break; } rval = qla83xx_minidump_process_pollrd(ha, entry_hdr, &data_ptr); if (rval != QLA_SUCCESS) qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); break; case QLA83XX_RDMUX2: if (is_qla8022(ha)) { qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); break; } qla83xx_minidump_process_rdmux2(ha, entry_hdr, &data_ptr); break; case QLA83XX_POLLRDMWR: if (is_qla8022(ha)) { qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); break; } rval = qla83xx_minidump_process_pollrdmwr(ha, entry_hdr, &data_ptr); if (rval != QLA_SUCCESS) qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); break; case QLA8044_RDDFE: rval = qla4_84xx_minidump_process_rddfe(ha, entry_hdr, &data_ptr); if (rval != QLA_SUCCESS) qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); break; case QLA8044_RDMDIO: rval = qla4_84xx_minidump_process_rdmdio(ha, entry_hdr, &data_ptr); if (rval != QLA_SUCCESS) qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); break; case QLA8044_POLLWR: rval = qla4_84xx_minidump_process_pollwr(ha, entry_hdr, &data_ptr); if (rval != QLA_SUCCESS) qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); break; case QLA8XXX_RDNOP: default: qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); break; } data_collected = (uint8_t *)data_ptr - (uint8_t *)ha->fw_dump; skip_nxt_entry: /* next entry in the template */ entry_hdr = (struct qla8xxx_minidump_entry_hdr *) (((uint8_t *)entry_hdr) + entry_hdr->entry_size); } if ((data_collected + ha->fw_dump_skip_size) != ha->fw_dump_size) { ql4_printk(KERN_INFO, ha, "Dump data mismatch: Data collected: [0x%x], total_data_size:[0x%x]\n", data_collected, ha->fw_dump_size); rval = QLA_ERROR; goto md_failed; } DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s Last entry: 0x%x\n", __func__, i)); md_failed: return rval; } /** * qla4_8xxx_uevent_emit - Send uevent when the firmware dump is ready. * @ha: pointer to adapter structure * @code: uevent code to act upon **/ static void qla4_8xxx_uevent_emit(struct scsi_qla_host *ha, u32 code) { char event_string[40]; char *envp[] = { event_string, NULL }; switch (code) { case QL4_UEVENT_CODE_FW_DUMP: snprintf(event_string, sizeof(event_string), "FW_DUMP=%lu", ha->host_no); break; default: /*do nothing*/ break; } kobject_uevent_env(&(&ha->pdev->dev)->kobj, KOBJ_CHANGE, envp); } void qla4_8xxx_get_minidump(struct scsi_qla_host *ha) { if (ql4xenablemd && test_bit(AF_FW_RECOVERY, &ha->flags) && !test_bit(AF_82XX_FW_DUMPED, &ha->flags)) { if (!qla4_8xxx_collect_md_data(ha)) { qla4_8xxx_uevent_emit(ha, QL4_UEVENT_CODE_FW_DUMP); set_bit(AF_82XX_FW_DUMPED, &ha->flags); } else { ql4_printk(KERN_INFO, ha, "%s: Unable to collect minidump\n", __func__); } } } /** * qla4_8xxx_device_bootstrap - Initialize device, set DEV_READY, start fw * @ha: pointer to adapter structure * * Note: IDC lock must be held upon entry **/ int qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha) { int rval = QLA_ERROR; int i; uint32_t old_count, count; int need_reset = 0; need_reset = ha->isp_ops->need_reset(ha); if (need_reset) { /* We are trying to perform a recovery here. */ if (test_bit(AF_FW_RECOVERY, &ha->flags)) ha->isp_ops->rom_lock_recovery(ha); } else { old_count = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER); for (i = 0; i < 10; i++) { msleep(200); count = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER); if (count != old_count) { rval = QLA_SUCCESS; goto dev_ready; } } ha->isp_ops->rom_lock_recovery(ha); } /* set to DEV_INITIALIZING */ ql4_printk(KERN_INFO, ha, "HW State: INITIALIZING\n"); qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, QLA8XXX_DEV_INITIALIZING); ha->isp_ops->idc_unlock(ha); if (is_qla8022(ha)) qla4_8xxx_get_minidump(ha); rval = ha->isp_ops->restart_firmware(ha); ha->isp_ops->idc_lock(ha); if (rval != QLA_SUCCESS) { ql4_printk(KERN_INFO, ha, "HW State: FAILED\n"); qla4_8xxx_clear_drv_active(ha); qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, QLA8XXX_DEV_FAILED); return rval; } dev_ready: ql4_printk(KERN_INFO, ha, "HW State: READY\n"); qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, QLA8XXX_DEV_READY); return rval; } /** * qla4_82xx_need_reset_handler - Code to start reset sequence * @ha: pointer to adapter structure * * Note: IDC lock must be held upon entry **/ static void qla4_82xx_need_reset_handler(struct scsi_qla_host *ha) { uint32_t dev_state, drv_state, drv_active; uint32_t active_mask = 0xFFFFFFFF; unsigned long reset_timeout; ql4_printk(KERN_INFO, ha, "Performing ISP error recovery\n"); if (test_and_clear_bit(AF_ONLINE, &ha->flags)) { qla4_82xx_idc_unlock(ha); ha->isp_ops->disable_intrs(ha); qla4_82xx_idc_lock(ha); } if (!test_bit(AF_8XXX_RST_OWNER, &ha->flags)) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s(%ld): reset acknowledged\n", __func__, ha->host_no)); qla4_8xxx_set_rst_ready(ha); } else { active_mask = (~(1 << (ha->func_num * 4))); } /* wait for 10 seconds for reset ack from all functions */ reset_timeout = jiffies + (ha->nx_reset_timeout * HZ); drv_state = qla4_82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); drv_active = qla4_82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n", __func__, ha->host_no, drv_state, drv_active); while (drv_state != (drv_active & active_mask)) { if (time_after_eq(jiffies, reset_timeout)) { ql4_printk(KERN_INFO, ha, "%s: RESET TIMEOUT! drv_state: 0x%08x, drv_active: 0x%08x\n", DRIVER_NAME, drv_state, drv_active); break; } /* * When reset_owner times out, check which functions * acked/did not ack */ if (test_bit(AF_8XXX_RST_OWNER, &ha->flags)) { ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n", __func__, ha->host_no, drv_state, drv_active); } qla4_82xx_idc_unlock(ha); msleep(1000); qla4_82xx_idc_lock(ha); drv_state = qla4_82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); drv_active = qla4_82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); } /* Clear RESET OWNER as we are not going to use it any further */ clear_bit(AF_8XXX_RST_OWNER, &ha->flags); dev_state = qla4_82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n", dev_state, dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); /* Force to DEV_COLD unless someone else is starting a reset */ if (dev_state != QLA8XXX_DEV_INITIALIZING) { ql4_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n"); qla4_82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_COLD); qla4_8xxx_set_rst_ready(ha); } } /** * qla4_8xxx_need_qsnt_handler - Code to start qsnt * @ha: pointer to adapter structure **/ void qla4_8xxx_need_qsnt_handler(struct scsi_qla_host *ha) { ha->isp_ops->idc_lock(ha); qla4_8xxx_set_qsnt_ready(ha); ha->isp_ops->idc_unlock(ha); } static void qla4_82xx_set_idc_ver(struct scsi_qla_host *ha) { int idc_ver; uint32_t drv_active; drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE); if (drv_active == (1 << (ha->func_num * 4))) { qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION); ql4_printk(KERN_INFO, ha, "%s: IDC version updated to %d\n", __func__, QLA82XX_IDC_VERSION); } else { idc_ver = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION); if (QLA82XX_IDC_VERSION != idc_ver) { ql4_printk(KERN_INFO, ha, "%s: qla4xxx driver IDC version %d is not compatible with IDC version %d of other drivers!\n", __func__, QLA82XX_IDC_VERSION, idc_ver); } } } static int qla4_83xx_set_idc_ver(struct scsi_qla_host *ha) { int idc_ver; uint32_t drv_active; int rval = QLA_SUCCESS; drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE); if (drv_active == (1 << ha->func_num)) { idc_ver = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION); idc_ver &= (~0xFF); idc_ver |= QLA83XX_IDC_VER_MAJ_VALUE; qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION, idc_ver); ql4_printk(KERN_INFO, ha, "%s: IDC version updated to %d\n", __func__, idc_ver); } else { idc_ver = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION); idc_ver &= 0xFF; if (QLA83XX_IDC_VER_MAJ_VALUE != idc_ver) { ql4_printk(KERN_INFO, ha, "%s: qla4xxx driver IDC version %d is not compatible with IDC version %d of other drivers!\n", __func__, QLA83XX_IDC_VER_MAJ_VALUE, idc_ver); rval = QLA_ERROR; goto exit_set_idc_ver; } } /* Update IDC_MINOR_VERSION */ idc_ver = qla4_83xx_rd_reg(ha, QLA83XX_CRB_IDC_VER_MINOR); idc_ver &= ~(0x03 << (ha->func_num * 2)); idc_ver |= (QLA83XX_IDC_VER_MIN_VALUE << (ha->func_num * 2)); qla4_83xx_wr_reg(ha, QLA83XX_CRB_IDC_VER_MINOR, idc_ver); exit_set_idc_ver: return rval; } int qla4_8xxx_update_idc_reg(struct scsi_qla_host *ha) { uint32_t drv_active; int rval = QLA_SUCCESS; if (test_bit(AF_INIT_DONE, &ha->flags)) goto exit_update_idc_reg; ha->isp_ops->idc_lock(ha); qla4_8xxx_set_drv_active(ha); /* * If we are the first driver to load and * ql4xdontresethba is not set, clear IDC_CTRL BIT0. */ if (is_qla8032(ha) || is_qla8042(ha)) { drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE); if ((drv_active == (1 << ha->func_num)) && !ql4xdontresethba) qla4_83xx_clear_idc_dontreset(ha); } if (is_qla8022(ha)) { qla4_82xx_set_idc_ver(ha); } else if (is_qla8032(ha) || is_qla8042(ha)) { rval = qla4_83xx_set_idc_ver(ha); if (rval == QLA_ERROR) qla4_8xxx_clear_drv_active(ha); } ha->isp_ops->idc_unlock(ha); exit_update_idc_reg: return rval; } /** * qla4_8xxx_device_state_handler - Adapter state machine * @ha: pointer to host adapter structure. * * Note: IDC lock must be UNLOCKED upon entry **/ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha) { uint32_t dev_state; int rval = QLA_SUCCESS; unsigned long dev_init_timeout; rval = qla4_8xxx_update_idc_reg(ha); if (rval == QLA_ERROR) goto exit_state_handler; dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE); DEBUG2(ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n", dev_state, dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown")); /* wait for 30 seconds for device to go ready */ dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ); ha->isp_ops->idc_lock(ha); while (1) { if (time_after_eq(jiffies, dev_init_timeout)) { ql4_printk(KERN_WARNING, ha, "%s: Device Init Failed 0x%x = %s\n", DRIVER_NAME, dev_state, dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, QLA8XXX_DEV_FAILED); } dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE); ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n", dev_state, dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); /* NOTE: Make sure idc unlocked upon exit of switch statement */ switch (dev_state) { case QLA8XXX_DEV_READY: goto exit; case QLA8XXX_DEV_COLD: rval = qla4_8xxx_device_bootstrap(ha); goto exit; case QLA8XXX_DEV_INITIALIZING: ha->isp_ops->idc_unlock(ha); msleep(1000); ha->isp_ops->idc_lock(ha); break; case QLA8XXX_DEV_NEED_RESET: /* * For ISP8324 and ISP8042, if NEED_RESET is set by any * driver, it should be honored, irrespective of * IDC_CTRL DONTRESET_BIT0 */ if (is_qla8032(ha) || is_qla8042(ha)) { qla4_83xx_need_reset_handler(ha); } else if (is_qla8022(ha)) { if (!ql4xdontresethba) { qla4_82xx_need_reset_handler(ha); /* Update timeout value after need * reset handler */ dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ); } else { ha->isp_ops->idc_unlock(ha); msleep(1000); ha->isp_ops->idc_lock(ha); } } break; case QLA8XXX_DEV_NEED_QUIESCENT: /* idc locked/unlocked in handler */ qla4_8xxx_need_qsnt_handler(ha); break; case QLA8XXX_DEV_QUIESCENT: ha->isp_ops->idc_unlock(ha); msleep(1000); ha->isp_ops->idc_lock(ha); break; case QLA8XXX_DEV_FAILED: ha->isp_ops->idc_unlock(ha); qla4xxx_dead_adapter_cleanup(ha); rval = QLA_ERROR; ha->isp_ops->idc_lock(ha); goto exit; default: ha->isp_ops->idc_unlock(ha); qla4xxx_dead_adapter_cleanup(ha); rval = QLA_ERROR; ha->isp_ops->idc_lock(ha); goto exit; } } exit: ha->isp_ops->idc_unlock(ha); exit_state_handler: return rval; } int qla4_8xxx_load_risc(struct scsi_qla_host *ha) { int retval; /* clear the interrupt */ if (is_qla8032(ha) || is_qla8042(ha)) { writel(0, &ha->qla4_83xx_reg->risc_intr); readl(&ha->qla4_83xx_reg->risc_intr); } else if (is_qla8022(ha)) { writel(0, &ha->qla4_82xx_reg->host_int); readl(&ha->qla4_82xx_reg->host_int); } retval = qla4_8xxx_device_state_handler(ha); /* Initialize request and response queues. */ if (retval == QLA_SUCCESS) qla4xxx_init_rings(ha); if (retval == QLA_SUCCESS && !test_bit(AF_IRQ_ATTACHED, &ha->flags)) retval = qla4xxx_request_irqs(ha); return retval; } /*****************************************************************************/ /* Flash Manipulation Routines */ /*****************************************************************************/ #define OPTROM_BURST_SIZE 0x1000 #define OPTROM_BURST_DWORDS (OPTROM_BURST_SIZE / 4) #define FARX_DATA_FLAG BIT_31 #define FARX_ACCESS_FLASH_CONF 0x7FFD0000 #define FARX_ACCESS_FLASH_DATA 0x7FF00000 static inline uint32_t flash_conf_addr(struct ql82xx_hw_data *hw, uint32_t faddr) { return hw->flash_conf_off | faddr; } static uint32_t * qla4_82xx_read_flash_data(struct scsi_qla_host *ha, uint32_t *dwptr, uint32_t faddr, uint32_t length) { uint32_t i; uint32_t val; int loops = 0; while ((qla4_82xx_rom_lock(ha) != 0) && (loops < 50000)) { udelay(100); cond_resched(); loops++; } if (loops >= 50000) { ql4_printk(KERN_WARNING, ha, "ROM lock failed\n"); return dwptr; } /* Dword reads to flash. */ for (i = 0; i < length/4; i++, faddr += 4) { if (qla4_82xx_do_rom_fast_read(ha, faddr, &val)) { ql4_printk(KERN_WARNING, ha, "Do ROM fast read failed\n"); goto done_read; } dwptr[i] = cpu_to_le32(val); } done_read: qla4_82xx_rom_unlock(ha); return dwptr; } /* * Address and length are byte address */ static uint8_t * qla4_82xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf, uint32_t offset, uint32_t length) { qla4_82xx_read_flash_data(ha, (uint32_t *)buf, offset, length); return buf; } static int qla4_8xxx_find_flt_start(struct scsi_qla_host *ha, uint32_t *start) { const char *loc, *locations[] = { "DEF", "PCI" }; /* * FLT-location structure resides after the last PCI region. */ /* Begin with sane defaults. */ loc = locations[0]; *start = FA_FLASH_LAYOUT_ADDR_82; DEBUG2(ql4_printk(KERN_INFO, ha, "FLTL[%s] = 0x%x.\n", loc, *start)); return QLA_SUCCESS; } static void qla4_8xxx_get_flt_info(struct scsi_qla_host *ha, uint32_t flt_addr) { const char *loc, *locations[] = { "DEF", "FLT" }; uint16_t *wptr; uint16_t cnt, chksum; uint32_t start, status; struct qla_flt_header *flt; struct qla_flt_region *region; struct ql82xx_hw_data *hw = &ha->hw; hw->flt_region_flt = flt_addr; wptr = (uint16_t *)ha->request_ring; flt = (struct qla_flt_header *)ha->request_ring; region = (struct qla_flt_region *)&flt[1]; if (is_qla8022(ha)) { qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring, flt_addr << 2, OPTROM_BURST_SIZE); } else if (is_qla8032(ha) || is_qla8042(ha)) { status = qla4_83xx_flash_read_u32(ha, flt_addr << 2, (uint8_t *)ha->request_ring, 0x400); if (status != QLA_SUCCESS) goto no_flash_data; } if (*wptr == cpu_to_le16(0xffff)) goto no_flash_data; if (flt->version != cpu_to_le16(1)) { DEBUG2(ql4_printk(KERN_INFO, ha, "Unsupported FLT detected: " "version=0x%x length=0x%x checksum=0x%x.\n", le16_to_cpu(flt->version), le16_to_cpu(flt->length), le16_to_cpu(flt->checksum))); goto no_flash_data; } cnt = (sizeof(struct qla_flt_header) + le16_to_cpu(flt->length)) >> 1; for (chksum = 0; cnt; cnt--) chksum += le16_to_cpu(*wptr++); if (chksum) { DEBUG2(ql4_printk(KERN_INFO, ha, "Inconsistent FLT detected: " "version=0x%x length=0x%x checksum=0x%x.\n", le16_to_cpu(flt->version), le16_to_cpu(flt->length), chksum)); goto no_flash_data; } loc = locations[1]; cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region); for ( ; cnt; cnt--, region++) { /* Store addresses as DWORD offsets. */ start = le32_to_cpu(region->start) >> 2; DEBUG3(ql4_printk(KERN_DEBUG, ha, "FLT[%02x]: start=0x%x " "end=0x%x size=0x%x.\n", le32_to_cpu(region->code), start, le32_to_cpu(region->end) >> 2, le32_to_cpu(region->size))); switch (le32_to_cpu(region->code) & 0xff) { case FLT_REG_FDT: hw->flt_region_fdt = start; break; case FLT_REG_BOOT_CODE_82: hw->flt_region_boot = start; break; case FLT_REG_FW_82: case FLT_REG_FW_82_1: hw->flt_region_fw = start; break; case FLT_REG_BOOTLOAD_82: hw->flt_region_bootload = start; break; case FLT_REG_ISCSI_PARAM: hw->flt_iscsi_param = start; break; case FLT_REG_ISCSI_CHAP: hw->flt_region_chap = start; hw->flt_chap_size = le32_to_cpu(region->size); break; case FLT_REG_ISCSI_DDB: hw->flt_region_ddb = start; hw->flt_ddb_size = le32_to_cpu(region->size); break; } } goto done; no_flash_data: /* Use hardcoded defaults. */ loc = locations[0]; hw->flt_region_fdt = FA_FLASH_DESCR_ADDR_82; hw->flt_region_boot = FA_BOOT_CODE_ADDR_82; hw->flt_region_bootload = FA_BOOT_LOAD_ADDR_82; hw->flt_region_fw = FA_RISC_CODE_ADDR_82; hw->flt_region_chap = FA_FLASH_ISCSI_CHAP >> 2; hw->flt_chap_size = FA_FLASH_CHAP_SIZE; hw->flt_region_ddb = FA_FLASH_ISCSI_DDB >> 2; hw->flt_ddb_size = FA_FLASH_DDB_SIZE; done: DEBUG2(ql4_printk(KERN_INFO, ha, "FLT[%s]: flt=0x%x fdt=0x%x boot=0x%x bootload=0x%x fw=0x%x chap=0x%x chap_size=0x%x ddb=0x%x ddb_size=0x%x\n", loc, hw->flt_region_flt, hw->flt_region_fdt, hw->flt_region_boot, hw->flt_region_bootload, hw->flt_region_fw, hw->flt_region_chap, hw->flt_chap_size, hw->flt_region_ddb, hw->flt_ddb_size)); } static void qla4_82xx_get_fdt_info(struct scsi_qla_host *ha) { #define FLASH_BLK_SIZE_4K 0x1000 #define FLASH_BLK_SIZE_32K 0x8000 #define FLASH_BLK_SIZE_64K 0x10000 const char *loc, *locations[] = { "MID", "FDT" }; uint16_t cnt, chksum; uint16_t *wptr; struct qla_fdt_layout *fdt; uint16_t mid = 0; uint16_t fid = 0; struct ql82xx_hw_data *hw = &ha->hw; hw->flash_conf_off = FARX_ACCESS_FLASH_CONF; hw->flash_data_off = FARX_ACCESS_FLASH_DATA; wptr = (uint16_t *)ha->request_ring; fdt = (struct qla_fdt_layout *)ha->request_ring; qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring, hw->flt_region_fdt << 2, OPTROM_BURST_SIZE); if (*wptr == cpu_to_le16(0xffff)) goto no_flash_data; if (fdt->sig[0] != 'Q' || fdt->sig[1] != 'L' || fdt->sig[2] != 'I' || fdt->sig[3] != 'D') goto no_flash_data; for (cnt = 0, chksum = 0; cnt < sizeof(struct qla_fdt_layout) >> 1; cnt++) chksum += le16_to_cpu(*wptr++); if (chksum) { DEBUG2(ql4_printk(KERN_INFO, ha, "Inconsistent FDT detected: " "checksum=0x%x id=%c version=0x%x.\n", chksum, fdt->sig[0], le16_to_cpu(fdt->version))); goto no_flash_data; } loc = locations[1]; mid = le16_to_cpu(fdt->man_id); fid = le16_to_cpu(fdt->id); hw->fdt_wrt_disable = fdt->wrt_disable_bits; hw->fdt_erase_cmd = flash_conf_addr(hw, 0x0300 | fdt->erase_cmd); hw->fdt_block_size = le32_to_cpu(fdt->block_size); if (fdt->unprotect_sec_cmd) { hw->fdt_unprotect_sec_cmd = flash_conf_addr(hw, 0x0300 | fdt->unprotect_sec_cmd); hw->fdt_protect_sec_cmd = fdt->protect_sec_cmd ? flash_conf_addr(hw, 0x0300 | fdt->protect_sec_cmd) : flash_conf_addr(hw, 0x0336); } goto done; no_flash_data: loc = locations[0]; hw->fdt_block_size = FLASH_BLK_SIZE_64K; done: DEBUG2(ql4_printk(KERN_INFO, ha, "FDT[%s]: (0x%x/0x%x) erase=0x%x " "pro=%x upro=%x wrtd=0x%x blk=0x%x.\n", loc, mid, fid, hw->fdt_erase_cmd, hw->fdt_protect_sec_cmd, hw->fdt_unprotect_sec_cmd, hw->fdt_wrt_disable, hw->fdt_block_size)); } static void qla4_82xx_get_idc_param(struct scsi_qla_host *ha) { #define QLA82XX_IDC_PARAM_ADDR 0x003e885c uint32_t *wptr; if (!is_qla8022(ha)) return; wptr = (uint32_t *)ha->request_ring; qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring, QLA82XX_IDC_PARAM_ADDR , 8); if (*wptr == cpu_to_le32(0xffffffff)) { ha->nx_dev_init_timeout = ROM_DEV_INIT_TIMEOUT; ha->nx_reset_timeout = ROM_DRV_RESET_ACK_TIMEOUT; } else { ha->nx_dev_init_timeout = le32_to_cpu(*wptr++); ha->nx_reset_timeout = le32_to_cpu(*wptr); } DEBUG2(ql4_printk(KERN_DEBUG, ha, "ha->nx_dev_init_timeout = %d\n", ha->nx_dev_init_timeout)); DEBUG2(ql4_printk(KERN_DEBUG, ha, "ha->nx_reset_timeout = %d\n", ha->nx_reset_timeout)); return; } void qla4_82xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd, int in_count) { int i; /* Load all mailbox registers, except mailbox 0. */ for (i = 1; i < in_count; i++) writel(mbx_cmd[i], &ha->qla4_82xx_reg->mailbox_in[i]); /* Wakeup firmware */ writel(mbx_cmd[0], &ha->qla4_82xx_reg->mailbox_in[0]); readl(&ha->qla4_82xx_reg->mailbox_in[0]); writel(HINT_MBX_INT_PENDING, &ha->qla4_82xx_reg->hint); readl(&ha->qla4_82xx_reg->hint); } void qla4_82xx_process_mbox_intr(struct scsi_qla_host *ha, int out_count) { int intr_status; intr_status = readl(&ha->qla4_82xx_reg->host_int); if (intr_status & ISRX_82XX_RISC_INT) { ha->mbox_status_count = out_count; intr_status = readl(&ha->qla4_82xx_reg->host_status); ha->isp_ops->interrupt_service_routine(ha, intr_status); if (test_bit(AF_INTERRUPTS_ON, &ha->flags) && (!ha->pdev->msi_enabled && !ha->pdev->msix_enabled)) qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff); } } int qla4_8xxx_get_flash_info(struct scsi_qla_host *ha) { int ret; uint32_t flt_addr; ret = qla4_8xxx_find_flt_start(ha, &flt_addr); if (ret != QLA_SUCCESS) return ret; qla4_8xxx_get_flt_info(ha, flt_addr); if (is_qla8022(ha)) { qla4_82xx_get_fdt_info(ha); qla4_82xx_get_idc_param(ha); } else if (is_qla8032(ha) || is_qla8042(ha)) { qla4_83xx_get_idc_param(ha); } return QLA_SUCCESS; } /** * qla4_8xxx_stop_firmware - stops firmware on specified adapter instance * @ha: pointer to host adapter structure. * * Remarks: * For iSCSI, throws away all I/O and AENs into bit bucket, so they will * not be available after successful return. Driver must cleanup potential * outstanding I/O's after calling this funcion. **/ int qla4_8xxx_stop_firmware(struct scsi_qla_host *ha) { int status; uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; memset(&mbox_cmd, 0, sizeof(mbox_cmd)); memset(&mbox_sts, 0, sizeof(mbox_sts)); mbox_cmd[0] = MBOX_CMD_STOP_FW; status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]); DEBUG2(printk("scsi%ld: %s: status = %d\n", ha->host_no, __func__, status)); return status; } /** * qla4_82xx_isp_reset - Resets ISP and aborts all outstanding commands. * @ha: pointer to host adapter structure. **/ int qla4_82xx_isp_reset(struct scsi_qla_host *ha) { int rval; uint32_t dev_state; qla4_82xx_idc_lock(ha); dev_state = qla4_82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); if (dev_state == QLA8XXX_DEV_READY) { ql4_printk(KERN_INFO, ha, "HW State: NEED RESET\n"); qla4_82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_NEED_RESET); set_bit(AF_8XXX_RST_OWNER, &ha->flags); } else ql4_printk(KERN_INFO, ha, "HW State: DEVICE INITIALIZING\n"); qla4_82xx_idc_unlock(ha); rval = qla4_8xxx_device_state_handler(ha); qla4_82xx_idc_lock(ha); qla4_8xxx_clear_rst_ready(ha); qla4_82xx_idc_unlock(ha); if (rval == QLA_SUCCESS) { ql4_printk(KERN_INFO, ha, "Clearing AF_RECOVERY in qla4_82xx_isp_reset\n"); clear_bit(AF_FW_RECOVERY, &ha->flags); } return rval; } /** * qla4_8xxx_get_sys_info - get adapter MAC address(es) and serial number * @ha: pointer to host adapter structure. * **/ int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha) { uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; struct mbx_sys_info *sys_info; dma_addr_t sys_info_dma; int status = QLA_ERROR; sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info), &sys_info_dma, GFP_KERNEL); if (sys_info == NULL) { DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n", ha->host_no, __func__)); return status; } memset(&mbox_cmd, 0, sizeof(mbox_cmd)); memset(&mbox_sts, 0, sizeof(mbox_sts)); mbox_cmd[0] = MBOX_CMD_GET_SYS_INFO; mbox_cmd[1] = LSDW(sys_info_dma); mbox_cmd[2] = MSDW(sys_info_dma); mbox_cmd[4] = sizeof(*sys_info); if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 6, &mbox_cmd[0], &mbox_sts[0]) != QLA_SUCCESS) { DEBUG2(printk("scsi%ld: %s: GET_SYS_INFO failed\n", ha->host_no, __func__)); goto exit_validate_mac82; } /* Make sure we receive the minimum required data to cache internally */ if (((is_qla8032(ha) || is_qla8042(ha)) ? mbox_sts[3] : mbox_sts[4]) < offsetof(struct mbx_sys_info, reserved)) { DEBUG2(printk("scsi%ld: %s: GET_SYS_INFO data receive" " error (%x)\n", ha->host_no, __func__, mbox_sts[4])); goto exit_validate_mac82; } /* Save M.A.C. address & serial_number */ ha->port_num = sys_info->port_num; memcpy(ha->my_mac, &sys_info->mac_addr[0], min(sizeof(ha->my_mac), sizeof(sys_info->mac_addr))); memcpy(ha->serial_number, &sys_info->serial_number, min(sizeof(ha->serial_number), sizeof(sys_info->serial_number))); memcpy(ha->model_name, &sys_info->board_id_str, min(sizeof(ha->model_name), sizeof(sys_info->board_id_str))); ha->phy_port_cnt = sys_info->phys_port_cnt; ha->phy_port_num = sys_info->port_num; ha->iscsi_pci_func_cnt = sys_info->iscsi_pci_func_cnt; DEBUG2(printk("scsi%ld: %s: mac %pM serial %s\n", ha->host_no, __func__, ha->my_mac, ha->serial_number)); status = QLA_SUCCESS; exit_validate_mac82: dma_free_coherent(&ha->pdev->dev, sizeof(*sys_info), sys_info, sys_info_dma); return status; } /* Interrupt handling helpers. */ int qla4_8xxx_intr_enable(struct scsi_qla_host *ha) { uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; DEBUG2(ql4_printk(KERN_INFO, ha, "%s\n", __func__)); memset(&mbox_cmd, 0, sizeof(mbox_cmd)); memset(&mbox_sts, 0, sizeof(mbox_sts)); mbox_cmd[0] = MBOX_CMD_ENABLE_INTRS; mbox_cmd[1] = INTR_ENABLE; if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]) != QLA_SUCCESS) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MBOX_CMD_ENABLE_INTRS failed (0x%04x)\n", __func__, mbox_sts[0])); return QLA_ERROR; } return QLA_SUCCESS; } int qla4_8xxx_intr_disable(struct scsi_qla_host *ha) { uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; DEBUG2(ql4_printk(KERN_INFO, ha, "%s\n", __func__)); memset(&mbox_cmd, 0, sizeof(mbox_cmd)); memset(&mbox_sts, 0, sizeof(mbox_sts)); mbox_cmd[0] = MBOX_CMD_ENABLE_INTRS; mbox_cmd[1] = INTR_DISABLE; if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]) != QLA_SUCCESS) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MBOX_CMD_ENABLE_INTRS failed (0x%04x)\n", __func__, mbox_sts[0])); return QLA_ERROR; } return QLA_SUCCESS; } void qla4_82xx_enable_intrs(struct scsi_qla_host *ha) { qla4_8xxx_intr_enable(ha); spin_lock_irq(&ha->hardware_lock); /* BIT 10 - reset */ qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff); spin_unlock_irq(&ha->hardware_lock); set_bit(AF_INTERRUPTS_ON, &ha->flags); } void qla4_82xx_disable_intrs(struct scsi_qla_host *ha) { if (test_and_clear_bit(AF_INTERRUPTS_ON, &ha->flags)) qla4_8xxx_intr_disable(ha); spin_lock_irq(&ha->hardware_lock); /* BIT 10 - set */ qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400); spin_unlock_irq(&ha->hardware_lock); } int qla4_8xxx_enable_msix(struct scsi_qla_host *ha) { int ret; ret = pci_alloc_irq_vectors(ha->pdev, QLA_MSIX_ENTRIES, QLA_MSIX_ENTRIES, PCI_IRQ_MSIX); if (ret < 0) { ql4_printk(KERN_WARNING, ha, "MSI-X: Failed to enable support -- %d/%d\n", QLA_MSIX_ENTRIES, ret); return ret; } ret = request_irq(pci_irq_vector(ha->pdev, 0), qla4_8xxx_default_intr_handler, 0, "qla4xxx (default)", ha); if (ret) goto out_free_vectors; ret = request_irq(pci_irq_vector(ha->pdev, 1), qla4_8xxx_msix_rsp_q, 0, "qla4xxx (rsp_q)", ha); if (ret) goto out_free_default_irq; return 0; out_free_default_irq: free_irq(pci_irq_vector(ha->pdev, 0), ha); out_free_vectors: pci_free_irq_vectors(ha->pdev); return ret; } int qla4_8xxx_check_init_adapter_retry(struct scsi_qla_host *ha) { int status = QLA_SUCCESS; /* Dont retry adapter initialization if IRQ allocation failed */ if (!test_bit(AF_IRQ_ATTACHED, &ha->flags)) { ql4_printk(KERN_WARNING, ha, "%s: Skipping retry of adapter initialization as IRQs are not attached\n", __func__); status = QLA_ERROR; goto exit_init_adapter_failure; } /* Since interrupts are registered in start_firmware for * 8xxx, release them here if initialize_adapter fails * and retry adapter initialization */ qla4xxx_free_irqs(ha); exit_init_adapter_failure: return status; }
linux-master
drivers/scsi/qla4xxx/ql4_nx.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic iSCSI HBA Driver * Copyright (c) 2011-2013 QLogic Corporation */ #include "ql4_def.h" #include "ql4_glbl.h" #include "ql4_bsg.h" static int qla4xxx_read_flash(struct bsg_job *bsg_job) { struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); struct scsi_qla_host *ha = to_qla_host(host); struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; struct iscsi_bsg_request *bsg_req = bsg_job->request; uint32_t offset = 0; uint32_t length = 0; dma_addr_t flash_dma; uint8_t *flash = NULL; int rval = -EINVAL; bsg_reply->reply_payload_rcv_len = 0; if (unlikely(pci_channel_offline(ha->pdev))) goto leave; if (ql4xxx_reset_active(ha)) { ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__); rval = -EBUSY; goto leave; } if (ha->flash_state != QLFLASH_WAITING) { ql4_printk(KERN_ERR, ha, "%s: another flash operation " "active\n", __func__); rval = -EBUSY; goto leave; } ha->flash_state = QLFLASH_READING; offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; length = bsg_job->reply_payload.payload_len; flash = dma_alloc_coherent(&ha->pdev->dev, length, &flash_dma, GFP_KERNEL); if (!flash) { ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash " "data\n", __func__); rval = -ENOMEM; goto leave; } rval = qla4xxx_get_flash(ha, flash_dma, offset, length); if (rval) { ql4_printk(KERN_ERR, ha, "%s: get flash failed\n", __func__); bsg_reply->result = DID_ERROR << 16; rval = -EIO; } else { bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, flash, length); bsg_reply->result = DID_OK << 16; } bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); dma_free_coherent(&ha->pdev->dev, length, flash, flash_dma); leave: ha->flash_state = QLFLASH_WAITING; return rval; } static int qla4xxx_update_flash(struct bsg_job *bsg_job) { struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); struct scsi_qla_host *ha = to_qla_host(host); struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; struct iscsi_bsg_request *bsg_req = bsg_job->request; uint32_t length = 0; uint32_t offset = 0; uint32_t options = 0; dma_addr_t flash_dma; uint8_t *flash = NULL; int rval = -EINVAL; bsg_reply->reply_payload_rcv_len = 0; if (unlikely(pci_channel_offline(ha->pdev))) goto leave; if (ql4xxx_reset_active(ha)) { ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__); rval = -EBUSY; goto leave; } if (ha->flash_state != QLFLASH_WAITING) { ql4_printk(KERN_ERR, ha, "%s: another flash operation " "active\n", __func__); rval = -EBUSY; goto leave; } ha->flash_state = QLFLASH_WRITING; length = bsg_job->request_payload.payload_len; offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; options = bsg_req->rqst_data.h_vendor.vendor_cmd[2]; flash = dma_alloc_coherent(&ha->pdev->dev, length, &flash_dma, GFP_KERNEL); if (!flash) { ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash " "data\n", __func__); rval = -ENOMEM; goto leave; } sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, flash, length); rval = qla4xxx_set_flash(ha, flash_dma, offset, length, options); if (rval) { ql4_printk(KERN_ERR, ha, "%s: set flash failed\n", __func__); bsg_reply->result = DID_ERROR << 16; rval = -EIO; } else bsg_reply->result = DID_OK << 16; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); dma_free_coherent(&ha->pdev->dev, length, flash, flash_dma); leave: ha->flash_state = QLFLASH_WAITING; return rval; } static int qla4xxx_get_acb_state(struct bsg_job *bsg_job) { struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); struct scsi_qla_host *ha = to_qla_host(host); struct iscsi_bsg_request *bsg_req = bsg_job->request; struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; uint32_t status[MBOX_REG_COUNT]; uint32_t acb_idx; uint32_t ip_idx; int rval = -EINVAL; bsg_reply->reply_payload_rcv_len = 0; if (unlikely(pci_channel_offline(ha->pdev))) goto leave; /* Only 4022 and above adapters are supported */ if (is_qla4010(ha)) goto leave; if (ql4xxx_reset_active(ha)) { ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__); rval = -EBUSY; goto leave; } if (bsg_job->reply_payload.payload_len < sizeof(status)) { ql4_printk(KERN_ERR, ha, "%s: invalid payload len %d\n", __func__, bsg_job->reply_payload.payload_len); rval = -EINVAL; goto leave; } acb_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; ip_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[2]; rval = qla4xxx_get_ip_state(ha, acb_idx, ip_idx, status); if (rval) { ql4_printk(KERN_ERR, ha, "%s: get ip state failed\n", __func__); bsg_reply->result = DID_ERROR << 16; rval = -EIO; } else { bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, status, sizeof(status)); bsg_reply->result = DID_OK << 16; } bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); leave: return rval; } static int qla4xxx_read_nvram(struct bsg_job *bsg_job) { struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); struct scsi_qla_host *ha = to_qla_host(host); struct iscsi_bsg_request *bsg_req = bsg_job->request; struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; uint32_t offset = 0; uint32_t len = 0; uint32_t total_len = 0; dma_addr_t nvram_dma; uint8_t *nvram = NULL; int rval = -EINVAL; bsg_reply->reply_payload_rcv_len = 0; if (unlikely(pci_channel_offline(ha->pdev))) goto leave; /* Only 40xx adapters are supported */ if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha))) goto leave; if (ql4xxx_reset_active(ha)) { ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__); rval = -EBUSY; goto leave; } offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; len = bsg_job->reply_payload.payload_len; total_len = offset + len; /* total len should not be greater than max NVRAM size */ if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) || ((is_qla4022(ha) || is_qla4032(ha)) && total_len > QL40X2_NVRAM_SIZE)) { ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max" " nvram size, offset=%d len=%d\n", __func__, offset, len); goto leave; } nvram = dma_alloc_coherent(&ha->pdev->dev, len, &nvram_dma, GFP_KERNEL); if (!nvram) { ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for nvram " "data\n", __func__); rval = -ENOMEM; goto leave; } rval = qla4xxx_get_nvram(ha, nvram_dma, offset, len); if (rval) { ql4_printk(KERN_ERR, ha, "%s: get nvram failed\n", __func__); bsg_reply->result = DID_ERROR << 16; rval = -EIO; } else { bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, nvram, len); bsg_reply->result = DID_OK << 16; } bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); dma_free_coherent(&ha->pdev->dev, len, nvram, nvram_dma); leave: return rval; } static int qla4xxx_update_nvram(struct bsg_job *bsg_job) { struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); struct scsi_qla_host *ha = to_qla_host(host); struct iscsi_bsg_request *bsg_req = bsg_job->request; struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; uint32_t offset = 0; uint32_t len = 0; uint32_t total_len = 0; dma_addr_t nvram_dma; uint8_t *nvram = NULL; int rval = -EINVAL; bsg_reply->reply_payload_rcv_len = 0; if (unlikely(pci_channel_offline(ha->pdev))) goto leave; if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha))) goto leave; if (ql4xxx_reset_active(ha)) { ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__); rval = -EBUSY; goto leave; } offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; len = bsg_job->request_payload.payload_len; total_len = offset + len; /* total len should not be greater than max NVRAM size */ if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) || ((is_qla4022(ha) || is_qla4032(ha)) && total_len > QL40X2_NVRAM_SIZE)) { ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max" " nvram size, offset=%d len=%d\n", __func__, offset, len); goto leave; } nvram = dma_alloc_coherent(&ha->pdev->dev, len, &nvram_dma, GFP_KERNEL); if (!nvram) { ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash " "data\n", __func__); rval = -ENOMEM; goto leave; } sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, nvram, len); rval = qla4xxx_set_nvram(ha, nvram_dma, offset, len); if (rval) { ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__); bsg_reply->result = DID_ERROR << 16; rval = -EIO; } else bsg_reply->result = DID_OK << 16; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); dma_free_coherent(&ha->pdev->dev, len, nvram, nvram_dma); leave: return rval; } static int qla4xxx_restore_defaults(struct bsg_job *bsg_job) { struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); struct scsi_qla_host *ha = to_qla_host(host); struct iscsi_bsg_request *bsg_req = bsg_job->request; struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; uint32_t region = 0; uint32_t field0 = 0; uint32_t field1 = 0; int rval = -EINVAL; bsg_reply->reply_payload_rcv_len = 0; if (unlikely(pci_channel_offline(ha->pdev))) goto leave; if (is_qla4010(ha)) goto leave; if (ql4xxx_reset_active(ha)) { ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__); rval = -EBUSY; goto leave; } region = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; field0 = bsg_req->rqst_data.h_vendor.vendor_cmd[2]; field1 = bsg_req->rqst_data.h_vendor.vendor_cmd[3]; rval = qla4xxx_restore_factory_defaults(ha, region, field0, field1); if (rval) { ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__); bsg_reply->result = DID_ERROR << 16; rval = -EIO; } else bsg_reply->result = DID_OK << 16; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); leave: return rval; } static int qla4xxx_bsg_get_acb(struct bsg_job *bsg_job) { struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); struct scsi_qla_host *ha = to_qla_host(host); struct iscsi_bsg_request *bsg_req = bsg_job->request; struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; uint32_t acb_type = 0; uint32_t len = 0; dma_addr_t acb_dma; uint8_t *acb = NULL; int rval = -EINVAL; bsg_reply->reply_payload_rcv_len = 0; if (unlikely(pci_channel_offline(ha->pdev))) goto leave; /* Only 4022 and above adapters are supported */ if (is_qla4010(ha)) goto leave; if (ql4xxx_reset_active(ha)) { ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__); rval = -EBUSY; goto leave; } acb_type = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; len = bsg_job->reply_payload.payload_len; if (len < sizeof(struct addr_ctrl_blk)) { ql4_printk(KERN_ERR, ha, "%s: invalid acb len %d\n", __func__, len); rval = -EINVAL; goto leave; } acb = dma_alloc_coherent(&ha->pdev->dev, len, &acb_dma, GFP_KERNEL); if (!acb) { ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for acb " "data\n", __func__); rval = -ENOMEM; goto leave; } rval = qla4xxx_get_acb(ha, acb_dma, acb_type, len); if (rval) { ql4_printk(KERN_ERR, ha, "%s: get acb failed\n", __func__); bsg_reply->result = DID_ERROR << 16; rval = -EIO; } else { bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, acb, len); bsg_reply->result = DID_OK << 16; } bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); dma_free_coherent(&ha->pdev->dev, len, acb, acb_dma); leave: return rval; } static void ql4xxx_execute_diag_cmd(struct bsg_job *bsg_job) { struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); struct scsi_qla_host *ha = to_qla_host(host); struct iscsi_bsg_request *bsg_req = bsg_job->request; struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; uint8_t *rsp_ptr = NULL; uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; int status = QLA_ERROR; DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__)); if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) { ql4_printk(KERN_INFO, ha, "%s: Adapter reset in progress. Invalid Request\n", __func__); bsg_reply->result = DID_ERROR << 16; goto exit_diag_mem_test; } bsg_reply->reply_payload_rcv_len = 0; memcpy(mbox_cmd, &bsg_req->rqst_data.h_vendor.vendor_cmd[1], sizeof(uint32_t) * MBOX_REG_COUNT); DEBUG2(ql4_printk(KERN_INFO, ha, "%s: mbox_cmd: %08X %08X %08X %08X %08X %08X %08X %08X\n", __func__, mbox_cmd[0], mbox_cmd[1], mbox_cmd[2], mbox_cmd[3], mbox_cmd[4], mbox_cmd[5], mbox_cmd[6], mbox_cmd[7])); status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 8, &mbox_cmd[0], &mbox_sts[0]); DEBUG2(ql4_printk(KERN_INFO, ha, "%s: mbox_sts: %08X %08X %08X %08X %08X %08X %08X %08X\n", __func__, mbox_sts[0], mbox_sts[1], mbox_sts[2], mbox_sts[3], mbox_sts[4], mbox_sts[5], mbox_sts[6], mbox_sts[7])); if (status == QLA_SUCCESS) bsg_reply->result = DID_OK << 16; else bsg_reply->result = DID_ERROR << 16; /* Send mbox_sts to application */ bsg_job->reply_len = sizeof(struct iscsi_bsg_reply) + sizeof(mbox_sts); rsp_ptr = ((uint8_t *)bsg_reply) + sizeof(struct iscsi_bsg_reply); memcpy(rsp_ptr, mbox_sts, sizeof(mbox_sts)); exit_diag_mem_test: DEBUG2(ql4_printk(KERN_INFO, ha, "%s: bsg_reply->result = x%x, status = %s\n", __func__, bsg_reply->result, STATUS(status))); bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); } static int qla4_83xx_wait_for_loopback_config_comp(struct scsi_qla_host *ha, int wait_for_link) { int status = QLA_SUCCESS; if (!wait_for_completion_timeout(&ha->idc_comp, (IDC_COMP_TOV * HZ))) { ql4_printk(KERN_INFO, ha, "%s: IDC Complete notification not received, Waiting for another %d timeout", __func__, ha->idc_extend_tmo); if (ha->idc_extend_tmo) { if (!wait_for_completion_timeout(&ha->idc_comp, (ha->idc_extend_tmo * HZ))) { ha->notify_idc_comp = 0; ha->notify_link_up_comp = 0; ql4_printk(KERN_WARNING, ha, "%s: Aborting: IDC Complete notification not received", __func__); status = QLA_ERROR; goto exit_wait; } else { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IDC Complete notification received\n", __func__)); } } } else { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IDC Complete notification received\n", __func__)); } ha->notify_idc_comp = 0; if (wait_for_link) { if (!wait_for_completion_timeout(&ha->link_up_comp, (IDC_COMP_TOV * HZ))) { ha->notify_link_up_comp = 0; ql4_printk(KERN_WARNING, ha, "%s: Aborting: LINK UP notification not received", __func__); status = QLA_ERROR; goto exit_wait; } else { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: LINK UP notification received\n", __func__)); } ha->notify_link_up_comp = 0; } exit_wait: return status; } static int qla4_83xx_pre_loopback_config(struct scsi_qla_host *ha, uint32_t *mbox_cmd) { uint32_t config = 0; int status = QLA_SUCCESS; DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__)); status = qla4_83xx_get_port_config(ha, &config); if (status != QLA_SUCCESS) goto exit_pre_loopback_config; DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Default port config=%08X\n", __func__, config)); if ((config & ENABLE_INTERNAL_LOOPBACK) || (config & ENABLE_EXTERNAL_LOOPBACK)) { ql4_printk(KERN_INFO, ha, "%s: Loopback diagnostics already in progress. Invalid request\n", __func__); goto exit_pre_loopback_config; } if (mbox_cmd[1] == QL_DIAG_CMD_TEST_INT_LOOPBACK) config |= ENABLE_INTERNAL_LOOPBACK; if (mbox_cmd[1] == QL_DIAG_CMD_TEST_EXT_LOOPBACK) config |= ENABLE_EXTERNAL_LOOPBACK; config &= ~ENABLE_DCBX; DEBUG2(ql4_printk(KERN_INFO, ha, "%s: New port config=%08X\n", __func__, config)); ha->notify_idc_comp = 1; ha->notify_link_up_comp = 1; /* get the link state */ qla4xxx_get_firmware_state(ha); status = qla4_83xx_set_port_config(ha, &config); if (status != QLA_SUCCESS) { ha->notify_idc_comp = 0; ha->notify_link_up_comp = 0; goto exit_pre_loopback_config; } exit_pre_loopback_config: DEBUG2(ql4_printk(KERN_INFO, ha, "%s: status = %s\n", __func__, STATUS(status))); return status; } static int qla4_83xx_post_loopback_config(struct scsi_qla_host *ha, uint32_t *mbox_cmd) { int status = QLA_SUCCESS; uint32_t config = 0; DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__)); status = qla4_83xx_get_port_config(ha, &config); if (status != QLA_SUCCESS) goto exit_post_loopback_config; DEBUG2(ql4_printk(KERN_INFO, ha, "%s: port config=%08X\n", __func__, config)); if (mbox_cmd[1] == QL_DIAG_CMD_TEST_INT_LOOPBACK) config &= ~ENABLE_INTERNAL_LOOPBACK; else if (mbox_cmd[1] == QL_DIAG_CMD_TEST_EXT_LOOPBACK) config &= ~ENABLE_EXTERNAL_LOOPBACK; config |= ENABLE_DCBX; DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Restore default port config=%08X\n", __func__, config)); ha->notify_idc_comp = 1; if (ha->addl_fw_state & FW_ADDSTATE_LINK_UP) ha->notify_link_up_comp = 1; status = qla4_83xx_set_port_config(ha, &config); if (status != QLA_SUCCESS) { ql4_printk(KERN_INFO, ha, "%s: Scheduling adapter reset\n", __func__); set_bit(DPC_RESET_HA, &ha->dpc_flags); clear_bit(AF_LOOPBACK, &ha->flags); goto exit_post_loopback_config; } exit_post_loopback_config: DEBUG2(ql4_printk(KERN_INFO, ha, "%s: status = %s\n", __func__, STATUS(status))); return status; } static void qla4xxx_execute_diag_loopback_cmd(struct bsg_job *bsg_job) { struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); struct scsi_qla_host *ha = to_qla_host(host); struct iscsi_bsg_request *bsg_req = bsg_job->request; struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; uint8_t *rsp_ptr = NULL; uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; int wait_for_link = 1; int status = QLA_ERROR; DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__)); bsg_reply->reply_payload_rcv_len = 0; if (test_bit(AF_LOOPBACK, &ha->flags)) { ql4_printk(KERN_INFO, ha, "%s: Loopback Diagnostics already in progress. Invalid Request\n", __func__); bsg_reply->result = DID_ERROR << 16; goto exit_loopback_cmd; } if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) { ql4_printk(KERN_INFO, ha, "%s: Adapter reset in progress. Invalid Request\n", __func__); bsg_reply->result = DID_ERROR << 16; goto exit_loopback_cmd; } memcpy(mbox_cmd, &bsg_req->rqst_data.h_vendor.vendor_cmd[1], sizeof(uint32_t) * MBOX_REG_COUNT); if (is_qla8032(ha) || is_qla8042(ha)) { status = qla4_83xx_pre_loopback_config(ha, mbox_cmd); if (status != QLA_SUCCESS) { bsg_reply->result = DID_ERROR << 16; goto exit_loopback_cmd; } status = qla4_83xx_wait_for_loopback_config_comp(ha, wait_for_link); if (status != QLA_SUCCESS) { bsg_reply->result = DID_TIME_OUT << 16; goto restore; } } DEBUG2(ql4_printk(KERN_INFO, ha, "%s: mbox_cmd: %08X %08X %08X %08X %08X %08X %08X %08X\n", __func__, mbox_cmd[0], mbox_cmd[1], mbox_cmd[2], mbox_cmd[3], mbox_cmd[4], mbox_cmd[5], mbox_cmd[6], mbox_cmd[7])); status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 8, &mbox_cmd[0], &mbox_sts[0]); if (status == QLA_SUCCESS) bsg_reply->result = DID_OK << 16; else bsg_reply->result = DID_ERROR << 16; DEBUG2(ql4_printk(KERN_INFO, ha, "%s: mbox_sts: %08X %08X %08X %08X %08X %08X %08X %08X\n", __func__, mbox_sts[0], mbox_sts[1], mbox_sts[2], mbox_sts[3], mbox_sts[4], mbox_sts[5], mbox_sts[6], mbox_sts[7])); /* Send mbox_sts to application */ bsg_job->reply_len = sizeof(struct iscsi_bsg_reply) + sizeof(mbox_sts); rsp_ptr = ((uint8_t *)bsg_reply) + sizeof(struct iscsi_bsg_reply); memcpy(rsp_ptr, mbox_sts, sizeof(mbox_sts)); restore: if (is_qla8032(ha) || is_qla8042(ha)) { status = qla4_83xx_post_loopback_config(ha, mbox_cmd); if (status != QLA_SUCCESS) { bsg_reply->result = DID_ERROR << 16; goto exit_loopback_cmd; } /* for pre_loopback_config() wait for LINK UP only * if PHY LINK is UP */ if (!(ha->addl_fw_state & FW_ADDSTATE_LINK_UP)) wait_for_link = 0; status = qla4_83xx_wait_for_loopback_config_comp(ha, wait_for_link); if (status != QLA_SUCCESS) { bsg_reply->result = DID_TIME_OUT << 16; goto exit_loopback_cmd; } } exit_loopback_cmd: DEBUG2(ql4_printk(KERN_INFO, ha, "%s: bsg_reply->result = x%x, status = %s\n", __func__, bsg_reply->result, STATUS(status))); bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); } static int qla4xxx_execute_diag_test(struct bsg_job *bsg_job) { struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); struct scsi_qla_host *ha = to_qla_host(host); struct iscsi_bsg_request *bsg_req = bsg_job->request; uint32_t diag_cmd; int rval = -EINVAL; DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__)); diag_cmd = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; if (diag_cmd == MBOX_CMD_DIAG_TEST) { switch (bsg_req->rqst_data.h_vendor.vendor_cmd[2]) { case QL_DIAG_CMD_TEST_DDR_SIZE: case QL_DIAG_CMD_TEST_DDR_RW: case QL_DIAG_CMD_TEST_ONCHIP_MEM_RW: case QL_DIAG_CMD_TEST_NVRAM: case QL_DIAG_CMD_TEST_FLASH_ROM: case QL_DIAG_CMD_TEST_DMA_XFER: case QL_DIAG_CMD_SELF_DDR_RW: case QL_DIAG_CMD_SELF_ONCHIP_MEM_RW: /* Execute diag test for adapter RAM/FLASH */ ql4xxx_execute_diag_cmd(bsg_job); /* Always return success as we want to sent bsg_reply * to Application */ rval = QLA_SUCCESS; break; case QL_DIAG_CMD_TEST_INT_LOOPBACK: case QL_DIAG_CMD_TEST_EXT_LOOPBACK: /* Execute diag test for Network */ qla4xxx_execute_diag_loopback_cmd(bsg_job); /* Always return success as we want to sent bsg_reply * to Application */ rval = QLA_SUCCESS; break; default: ql4_printk(KERN_ERR, ha, "%s: Invalid diag test: 0x%x\n", __func__, bsg_req->rqst_data.h_vendor.vendor_cmd[2]); } } else if ((diag_cmd == MBOX_CMD_SET_LED_CONFIG) || (diag_cmd == MBOX_CMD_GET_LED_CONFIG)) { ql4xxx_execute_diag_cmd(bsg_job); rval = QLA_SUCCESS; } else { ql4_printk(KERN_ERR, ha, "%s: Invalid diag cmd: 0x%x\n", __func__, diag_cmd); } return rval; } /** * qla4xxx_process_vendor_specific - handle vendor specific bsg request * @bsg_job: iscsi_bsg_job to handle **/ int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job) { struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; struct iscsi_bsg_request *bsg_req = bsg_job->request; struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); struct scsi_qla_host *ha = to_qla_host(host); switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) { case QLISCSI_VND_READ_FLASH: return qla4xxx_read_flash(bsg_job); case QLISCSI_VND_UPDATE_FLASH: return qla4xxx_update_flash(bsg_job); case QLISCSI_VND_GET_ACB_STATE: return qla4xxx_get_acb_state(bsg_job); case QLISCSI_VND_READ_NVRAM: return qla4xxx_read_nvram(bsg_job); case QLISCSI_VND_UPDATE_NVRAM: return qla4xxx_update_nvram(bsg_job); case QLISCSI_VND_RESTORE_DEFAULTS: return qla4xxx_restore_defaults(bsg_job); case QLISCSI_VND_GET_ACB: return qla4xxx_bsg_get_acb(bsg_job); case QLISCSI_VND_DIAG_TEST: return qla4xxx_execute_diag_test(bsg_job); default: ql4_printk(KERN_ERR, ha, "%s: invalid BSG vendor command: " "0x%x\n", __func__, bsg_req->msgcode); bsg_reply->result = (DID_ERROR << 16); bsg_reply->reply_payload_rcv_len = 0; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return -ENOSYS; } } /** * qla4xxx_bsg_request - handle bsg request from ISCSI transport * @bsg_job: iscsi_bsg_job to handle */ int qla4xxx_bsg_request(struct bsg_job *bsg_job) { struct iscsi_bsg_request *bsg_req = bsg_job->request; struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); struct scsi_qla_host *ha = to_qla_host(host); switch (bsg_req->msgcode) { case ISCSI_BSG_HST_VENDOR: return qla4xxx_process_vendor_specific(bsg_job); default: ql4_printk(KERN_ERR, ha, "%s: invalid BSG command: 0x%x\n", __func__, bsg_req->msgcode); } return -ENOSYS; }
linux-master
drivers/scsi/qla4xxx/ql4_bsg.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic iSCSI HBA Driver * Copyright (c) 2003-2013 QLogic Corporation */ #include "ql4_def.h" #include "ql4_glbl.h" #include "ql4_dbg.h" #include "ql4_inline.h" static inline void eeprom_cmd(uint32_t cmd, struct scsi_qla_host *ha) { writel(cmd, isp_nvram(ha)); readl(isp_nvram(ha)); udelay(1); } static inline int eeprom_size(struct scsi_qla_host *ha) { return is_qla4010(ha) ? FM93C66A_SIZE_16 : FM93C86A_SIZE_16; } static inline int eeprom_no_addr_bits(struct scsi_qla_host *ha) { return is_qla4010(ha) ? FM93C56A_NO_ADDR_BITS_16 : FM93C86A_NO_ADDR_BITS_16 ; } static inline int eeprom_no_data_bits(struct scsi_qla_host *ha) { return FM93C56A_DATA_BITS_16; } static int fm93c56a_select(struct scsi_qla_host * ha) { DEBUG5(printk(KERN_ERR "fm93c56a_select:\n")); ha->eeprom_cmd_data = AUBURN_EEPROM_CS_1 | 0x000f0000; eeprom_cmd(ha->eeprom_cmd_data, ha); return 1; } static int fm93c56a_cmd(struct scsi_qla_host * ha, int cmd, int addr) { int i; int mask; int dataBit; int previousBit; /* Clock in a zero, then do the start bit. */ eeprom_cmd(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1, ha); eeprom_cmd(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_RISE, ha); eeprom_cmd(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_FALL, ha); mask = 1 << (FM93C56A_CMD_BITS - 1); /* Force the previous data bit to be different. */ previousBit = 0xffff; for (i = 0; i < FM93C56A_CMD_BITS; i++) { dataBit = (cmd & mask) ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0; if (previousBit != dataBit) { /* * If the bit changed, then change the DO state to * match. */ eeprom_cmd(ha->eeprom_cmd_data | dataBit, ha); previousBit = dataBit; } eeprom_cmd(ha->eeprom_cmd_data | dataBit | AUBURN_EEPROM_CLK_RISE, ha); eeprom_cmd(ha->eeprom_cmd_data | dataBit | AUBURN_EEPROM_CLK_FALL, ha); cmd = cmd << 1; } mask = 1 << (eeprom_no_addr_bits(ha) - 1); /* Force the previous data bit to be different. */ previousBit = 0xffff; for (i = 0; i < eeprom_no_addr_bits(ha); i++) { dataBit = addr & mask ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0; if (previousBit != dataBit) { /* * If the bit changed, then change the DO state to * match. */ eeprom_cmd(ha->eeprom_cmd_data | dataBit, ha); previousBit = dataBit; } eeprom_cmd(ha->eeprom_cmd_data | dataBit | AUBURN_EEPROM_CLK_RISE, ha); eeprom_cmd(ha->eeprom_cmd_data | dataBit | AUBURN_EEPROM_CLK_FALL, ha); addr = addr << 1; } return 1; } static int fm93c56a_deselect(struct scsi_qla_host * ha) { ha->eeprom_cmd_data = AUBURN_EEPROM_CS_0 | 0x000f0000; eeprom_cmd(ha->eeprom_cmd_data, ha); return 1; } static int fm93c56a_datain(struct scsi_qla_host * ha, unsigned short *value) { int i; int data = 0; int dataBit; /* Read the data bits * The first bit is a dummy. Clock right over it. */ for (i = 0; i < eeprom_no_data_bits(ha); i++) { eeprom_cmd(ha->eeprom_cmd_data | AUBURN_EEPROM_CLK_RISE, ha); eeprom_cmd(ha->eeprom_cmd_data | AUBURN_EEPROM_CLK_FALL, ha); dataBit = (readw(isp_nvram(ha)) & AUBURN_EEPROM_DI_1) ? 1 : 0; data = (data << 1) | dataBit; } *value = data; return 1; } static int eeprom_readword(int eepromAddr, u16 * value, struct scsi_qla_host * ha) { fm93c56a_select(ha); fm93c56a_cmd(ha, FM93C56A_READ, eepromAddr); fm93c56a_datain(ha, value); fm93c56a_deselect(ha); return 1; } /* Hardware_lock must be set before calling */ u16 rd_nvram_word(struct scsi_qla_host * ha, int offset) { u16 val = 0; /* NOTE: NVRAM uses half-word addresses */ eeprom_readword(offset, &val, ha); return val; } u8 rd_nvram_byte(struct scsi_qla_host *ha, int offset) { u16 val = 0; u8 rval = 0; int index = 0; if (offset & 0x1) index = (offset - 1) / 2; else index = offset / 2; val = le16_to_cpu(rd_nvram_word(ha, index)); if (offset & 0x1) rval = (u8)((val & 0xff00) >> 8); else rval = (u8)((val & 0x00ff)); return rval; } int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host * ha) { int status = QLA_ERROR; uint16_t checksum = 0; uint32_t index; unsigned long flags; spin_lock_irqsave(&ha->hardware_lock, flags); for (index = 0; index < eeprom_size(ha); index++) checksum += rd_nvram_word(ha, index); spin_unlock_irqrestore(&ha->hardware_lock, flags); if (checksum == 0) status = QLA_SUCCESS; return status; } /************************************************************************* * * Hardware Semaphore routines * *************************************************************************/ int ql4xxx_sem_spinlock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits) { uint32_t value; unsigned long flags; unsigned int seconds = 30; DEBUG2(printk("scsi%ld : Trying to get SEM lock - mask= 0x%x, code = " "0x%x\n", ha->host_no, sem_mask, sem_bits)); do { spin_lock_irqsave(&ha->hardware_lock, flags); writel((sem_mask | sem_bits), isp_semaphore(ha)); value = readw(isp_semaphore(ha)); spin_unlock_irqrestore(&ha->hardware_lock, flags); if ((value & (sem_mask >> 16)) == sem_bits) { DEBUG2(printk("scsi%ld : Got SEM LOCK - mask= 0x%x, " "code = 0x%x\n", ha->host_no, sem_mask, sem_bits)); return QLA_SUCCESS; } ssleep(1); } while (--seconds); return QLA_ERROR; } void ql4xxx_sem_unlock(struct scsi_qla_host * ha, u32 sem_mask) { unsigned long flags; spin_lock_irqsave(&ha->hardware_lock, flags); writel(sem_mask, isp_semaphore(ha)); readl(isp_semaphore(ha)); spin_unlock_irqrestore(&ha->hardware_lock, flags); DEBUG2(printk("scsi%ld : UNLOCK SEM - mask= 0x%x\n", ha->host_no, sem_mask)); } int ql4xxx_sem_lock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits) { uint32_t value; unsigned long flags; spin_lock_irqsave(&ha->hardware_lock, flags); writel((sem_mask | sem_bits), isp_semaphore(ha)); value = readw(isp_semaphore(ha)); spin_unlock_irqrestore(&ha->hardware_lock, flags); if ((value & (sem_mask >> 16)) == sem_bits) { DEBUG2(printk("scsi%ld : Got SEM LOCK - mask= 0x%x, code = " "0x%x, sema code=0x%x\n", ha->host_no, sem_mask, sem_bits, value)); return 1; } return 0; }
linux-master
drivers/scsi/qla4xxx/ql4_nvram.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic iSCSI HBA Driver * Copyright (c) 2003-2013 QLogic Corporation */ #include <scsi/iscsi_if.h> #include "ql4_def.h" #include "ql4_glbl.h" #include "ql4_dbg.h" #include "ql4_inline.h" static void ql4xxx_set_mac_number(struct scsi_qla_host *ha) { uint32_t value; unsigned long flags; /* Get the function number */ spin_lock_irqsave(&ha->hardware_lock, flags); value = readw(&ha->reg->ctrl_status); spin_unlock_irqrestore(&ha->hardware_lock, flags); switch (value & ISP_CONTROL_FN_MASK) { case ISP_CONTROL_FN0_SCSI: ha->mac_index = 1; break; case ISP_CONTROL_FN1_SCSI: ha->mac_index = 3; break; default: DEBUG2(printk("scsi%ld: %s: Invalid function number, " "ispControlStatus = 0x%x\n", ha->host_no, __func__, value)); break; } DEBUG2(printk("scsi%ld: %s: mac_index %d.\n", ha->host_no, __func__, ha->mac_index)); } /** * qla4xxx_free_ddb - deallocate ddb * @ha: pointer to host adapter structure. * @ddb_entry: pointer to device database entry * * This routine marks a DDB entry INVALID **/ void qla4xxx_free_ddb(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry) { /* Remove device pointer from index mapping arrays */ ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = (struct ddb_entry *) INVALID_ENTRY; ha->tot_ddbs--; } /** * qla4xxx_init_response_q_entries() - Initializes response queue entries. * @ha: HA context * * Beginning of request ring has initialization control block already built * by nvram config routine. **/ static void qla4xxx_init_response_q_entries(struct scsi_qla_host *ha) { uint16_t cnt; struct response *pkt; pkt = (struct response *)ha->response_ptr; for (cnt = 0; cnt < RESPONSE_QUEUE_DEPTH; cnt++) { pkt->signature = RESPONSE_PROCESSED; pkt++; } } /** * qla4xxx_init_rings - initialize hw queues * @ha: pointer to host adapter structure. * * This routine initializes the internal queues for the specified adapter. * The QLA4010 requires us to restart the queues at index 0. * The QLA4000 doesn't care, so just default to QLA4010's requirement. **/ int qla4xxx_init_rings(struct scsi_qla_host *ha) { unsigned long flags = 0; int i; /* Initialize request queue. */ spin_lock_irqsave(&ha->hardware_lock, flags); ha->request_out = 0; ha->request_in = 0; ha->request_ptr = &ha->request_ring[ha->request_in]; ha->req_q_count = REQUEST_QUEUE_DEPTH; /* Initialize response queue. */ ha->response_in = 0; ha->response_out = 0; ha->response_ptr = &ha->response_ring[ha->response_out]; if (is_qla8022(ha)) { writel(0, (unsigned long __iomem *)&ha->qla4_82xx_reg->req_q_out); writel(0, (unsigned long __iomem *)&ha->qla4_82xx_reg->rsp_q_in); writel(0, (unsigned long __iomem *)&ha->qla4_82xx_reg->rsp_q_out); } else if (is_qla8032(ha) || is_qla8042(ha)) { writel(0, (unsigned long __iomem *)&ha->qla4_83xx_reg->req_q_in); writel(0, (unsigned long __iomem *)&ha->qla4_83xx_reg->rsp_q_in); writel(0, (unsigned long __iomem *)&ha->qla4_83xx_reg->rsp_q_out); } else { /* * Initialize DMA Shadow registers. The firmware is really * supposed to take care of this, but on some uniprocessor * systems, the shadow registers aren't cleared-- causing * the interrupt_handler to think there are responses to be * processed when there aren't. */ ha->shadow_regs->req_q_out = cpu_to_le32(0); ha->shadow_regs->rsp_q_in = cpu_to_le32(0); wmb(); writel(0, &ha->reg->req_q_in); writel(0, &ha->reg->rsp_q_out); readl(&ha->reg->rsp_q_out); } qla4xxx_init_response_q_entries(ha); /* Initialize mailbox active array */ for (i = 0; i < MAX_MRB; i++) ha->active_mrb_array[i] = NULL; spin_unlock_irqrestore(&ha->hardware_lock, flags); return QLA_SUCCESS; } /** * qla4xxx_get_sys_info - validate adapter MAC address(es) * @ha: pointer to host adapter structure. * **/ int qla4xxx_get_sys_info(struct scsi_qla_host *ha) { struct flash_sys_info *sys_info; dma_addr_t sys_info_dma; int status = QLA_ERROR; sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info), &sys_info_dma, GFP_KERNEL); if (sys_info == NULL) { DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n", ha->host_no, __func__)); goto exit_get_sys_info_no_free; } /* Get flash sys info */ if (qla4xxx_get_flash(ha, sys_info_dma, FLASH_OFFSET_SYS_INFO, sizeof(*sys_info)) != QLA_SUCCESS) { DEBUG2(printk("scsi%ld: %s: get_flash FLASH_OFFSET_SYS_INFO " "failed\n", ha->host_no, __func__)); goto exit_get_sys_info; } /* Save M.A.C. address & serial_number */ memcpy(ha->my_mac, &sys_info->physAddr[0].address[0], min(sizeof(ha->my_mac), sizeof(sys_info->physAddr[0].address))); memcpy(ha->serial_number, &sys_info->acSerialNumber, min(sizeof(ha->serial_number), sizeof(sys_info->acSerialNumber))); status = QLA_SUCCESS; exit_get_sys_info: dma_free_coherent(&ha->pdev->dev, sizeof(*sys_info), sys_info, sys_info_dma); exit_get_sys_info_no_free: return status; } /** * qla4xxx_init_local_data - initialize adapter specific local data * @ha: pointer to host adapter structure. * **/ static void qla4xxx_init_local_data(struct scsi_qla_host *ha) { /* Initialize aen queue */ ha->aen_q_count = MAX_AEN_ENTRIES; } static uint8_t qla4xxx_wait_for_ip_config(struct scsi_qla_host *ha) { uint8_t ipv4_wait = 0; uint8_t ipv6_wait = 0; int8_t ip_address[IPv6_ADDR_LEN] = {0} ; /* If both IPv4 & IPv6 are enabled, possibly only one * IP address may be acquired, so check to see if we * need to wait for another */ if (is_ipv4_enabled(ha) && is_ipv6_enabled(ha)) { if (((ha->addl_fw_state & FW_ADDSTATE_DHCPv4_ENABLED) != 0) && ((ha->addl_fw_state & FW_ADDSTATE_DHCPv4_LEASE_ACQUIRED) == 0)) { ipv4_wait = 1; } if (((ha->ip_config.ipv6_addl_options & IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) != 0) && ((ha->ip_config.ipv6_link_local_state == IP_ADDRSTATE_ACQUIRING) || (ha->ip_config.ipv6_addr0_state == IP_ADDRSTATE_ACQUIRING) || (ha->ip_config.ipv6_addr1_state == IP_ADDRSTATE_ACQUIRING))) { ipv6_wait = 1; if ((ha->ip_config.ipv6_link_local_state == IP_ADDRSTATE_PREFERRED) || (ha->ip_config.ipv6_addr0_state == IP_ADDRSTATE_PREFERRED) || (ha->ip_config.ipv6_addr1_state == IP_ADDRSTATE_PREFERRED)) { DEBUG2(printk(KERN_INFO "scsi%ld: %s: " "Preferred IP configured." " Don't wait!\n", ha->host_no, __func__)); ipv6_wait = 0; } if (memcmp(&ha->ip_config.ipv6_default_router_addr, ip_address, IPv6_ADDR_LEN) == 0) { DEBUG2(printk(KERN_INFO "scsi%ld: %s: " "No Router configured. " "Don't wait!\n", ha->host_no, __func__)); ipv6_wait = 0; } if ((ha->ip_config.ipv6_default_router_state == IPV6_RTRSTATE_MANUAL) && (ha->ip_config.ipv6_link_local_state == IP_ADDRSTATE_TENTATIVE) && (memcmp(&ha->ip_config.ipv6_link_local_addr, &ha->ip_config.ipv6_default_router_addr, 4) == 0)) { DEBUG2(printk("scsi%ld: %s: LinkLocal Router & " "IP configured. Don't wait!\n", ha->host_no, __func__)); ipv6_wait = 0; } } if (ipv4_wait || ipv6_wait) { DEBUG2(printk("scsi%ld: %s: Wait for additional " "IP(s) \"", ha->host_no, __func__)); if (ipv4_wait) DEBUG2(printk("IPv4 ")); if (ha->ip_config.ipv6_link_local_state == IP_ADDRSTATE_ACQUIRING) DEBUG2(printk("IPv6LinkLocal ")); if (ha->ip_config.ipv6_addr0_state == IP_ADDRSTATE_ACQUIRING) DEBUG2(printk("IPv6Addr0 ")); if (ha->ip_config.ipv6_addr1_state == IP_ADDRSTATE_ACQUIRING) DEBUG2(printk("IPv6Addr1 ")); DEBUG2(printk("\"\n")); } } return ipv4_wait|ipv6_wait; } static int qla4_80xx_is_minidump_dma_capable(struct scsi_qla_host *ha, struct qla4_8xxx_minidump_template_hdr *md_hdr) { int offset = (is_qla8022(ha)) ? QLA8022_TEMPLATE_CAP_OFFSET : QLA83XX_TEMPLATE_CAP_OFFSET; int rval = 1; uint32_t *cap_offset; cap_offset = (uint32_t *)((char *)md_hdr + offset); if (!(le32_to_cpu(*cap_offset) & BIT_0)) { ql4_printk(KERN_INFO, ha, "PEX DMA Not supported %d\n", *cap_offset); rval = 0; } return rval; } /** * qla4xxx_alloc_fw_dump - Allocate memory for minidump data. * @ha: pointer to host adapter structure. **/ void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha) { int status; uint32_t capture_debug_level; int hdr_entry_bit, k; void *md_tmp; dma_addr_t md_tmp_dma; struct qla4_8xxx_minidump_template_hdr *md_hdr; int dma_capable; if (ha->fw_dump) { ql4_printk(KERN_WARNING, ha, "Firmware dump previously allocated.\n"); return; } status = qla4xxx_req_template_size(ha); if (status != QLA_SUCCESS) { ql4_printk(KERN_INFO, ha, "scsi%ld: Failed to get template size\n", ha->host_no); return; } clear_bit(AF_82XX_FW_DUMPED, &ha->flags); /* Allocate memory for saving the template */ md_tmp = dma_alloc_coherent(&ha->pdev->dev, ha->fw_dump_tmplt_size, &md_tmp_dma, GFP_KERNEL); if (!md_tmp) { ql4_printk(KERN_INFO, ha, "scsi%ld: Failed to allocate DMA memory\n", ha->host_no); return; } /* Request template */ status = qla4xxx_get_minidump_template(ha, md_tmp_dma); if (status != QLA_SUCCESS) { ql4_printk(KERN_INFO, ha, "scsi%ld: Failed to get minidump template\n", ha->host_no); goto alloc_cleanup; } md_hdr = (struct qla4_8xxx_minidump_template_hdr *)md_tmp; dma_capable = qla4_80xx_is_minidump_dma_capable(ha, md_hdr); capture_debug_level = md_hdr->capture_debug_level; /* Get capture mask based on module loadtime setting. */ if ((ql4xmdcapmask >= 0x3 && ql4xmdcapmask <= 0x7F) || (ql4xmdcapmask == 0xFF && dma_capable)) { ha->fw_dump_capture_mask = ql4xmdcapmask; } else { if (ql4xmdcapmask == 0xFF) ql4_printk(KERN_INFO, ha, "Falling back to default capture mask, as PEX DMA is not supported\n"); ha->fw_dump_capture_mask = capture_debug_level; } md_hdr->driver_capture_mask = ha->fw_dump_capture_mask; DEBUG2(ql4_printk(KERN_INFO, ha, "Minimum num of entries = %d\n", md_hdr->num_of_entries)); DEBUG2(ql4_printk(KERN_INFO, ha, "Dump template size = %d\n", ha->fw_dump_tmplt_size)); DEBUG2(ql4_printk(KERN_INFO, ha, "Selected Capture mask =0x%x\n", ha->fw_dump_capture_mask)); /* Calculate fw_dump_size */ for (hdr_entry_bit = 0x2, k = 1; (hdr_entry_bit & 0xFF); hdr_entry_bit <<= 1, k++) { if (hdr_entry_bit & ha->fw_dump_capture_mask) ha->fw_dump_size += md_hdr->capture_size_array[k]; } /* Total firmware dump size including command header */ ha->fw_dump_size += ha->fw_dump_tmplt_size; ha->fw_dump = vmalloc(ha->fw_dump_size); if (!ha->fw_dump) goto alloc_cleanup; DEBUG2(ql4_printk(KERN_INFO, ha, "Minidump Template Size = 0x%x KB\n", ha->fw_dump_tmplt_size)); DEBUG2(ql4_printk(KERN_INFO, ha, "Total Minidump size = 0x%x KB\n", ha->fw_dump_size)); memcpy(ha->fw_dump, md_tmp, ha->fw_dump_tmplt_size); ha->fw_dump_tmplt_hdr = ha->fw_dump; alloc_cleanup: dma_free_coherent(&ha->pdev->dev, ha->fw_dump_tmplt_size, md_tmp, md_tmp_dma); } static int qla4xxx_fw_ready(struct scsi_qla_host *ha) { uint32_t timeout_count; int ready = 0; DEBUG2(ql4_printk(KERN_INFO, ha, "Waiting for Firmware Ready..\n")); for (timeout_count = ADAPTER_INIT_TOV; timeout_count > 0; timeout_count--) { if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags)) qla4xxx_get_dhcp_ip_address(ha); /* Get firmware state. */ if (qla4xxx_get_firmware_state(ha) != QLA_SUCCESS) { DEBUG2(printk("scsi%ld: %s: unable to get firmware " "state\n", ha->host_no, __func__)); break; } if (ha->firmware_state & FW_STATE_ERROR) { DEBUG2(printk("scsi%ld: %s: an unrecoverable error has" " occurred\n", ha->host_no, __func__)); break; } if (ha->firmware_state & FW_STATE_CONFIG_WAIT) { /* * The firmware has not yet been issued an Initialize * Firmware command, so issue it now. */ if (qla4xxx_initialize_fw_cb(ha) == QLA_ERROR) break; /* Go back and test for ready state - no wait. */ continue; } if (ha->firmware_state & FW_STATE_WAIT_AUTOCONNECT) { DEBUG2(printk(KERN_INFO "scsi%ld: %s: fwstate:" "AUTOCONNECT in progress\n", ha->host_no, __func__)); } if (ha->firmware_state & FW_STATE_CONFIGURING_IP) { DEBUG2(printk(KERN_INFO "scsi%ld: %s: fwstate:" " CONFIGURING IP\n", ha->host_no, __func__)); /* * Check for link state after 15 secs and if link is * still DOWN then, cable is unplugged. Ignore "DHCP * in Progress/CONFIGURING IP" bit to check if firmware * is in ready state or not after 15 secs. * This is applicable for both 2.x & 3.x firmware */ if (timeout_count <= (ADAPTER_INIT_TOV - 15)) { if (ha->addl_fw_state & FW_ADDSTATE_LINK_UP) { DEBUG2(printk(KERN_INFO "scsi%ld: %s:" " LINK UP (Cable plugged)\n", ha->host_no, __func__)); } else if (ha->firmware_state & (FW_STATE_CONFIGURING_IP | FW_STATE_READY)) { DEBUG2(printk(KERN_INFO "scsi%ld: %s: " "LINK DOWN (Cable unplugged)\n", ha->host_no, __func__)); ha->firmware_state = FW_STATE_READY; } } } if (ha->firmware_state == FW_STATE_READY) { /* If DHCP IP Addr is available, retrieve it now. */ if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags)) qla4xxx_get_dhcp_ip_address(ha); if (!qla4xxx_wait_for_ip_config(ha) || timeout_count == 1) { DEBUG2(ql4_printk(KERN_INFO, ha, "Firmware Ready..\n")); /* The firmware is ready to process SCSI commands. */ DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: %s: MEDIA TYPE" " - %s\n", ha->host_no, __func__, (ha->addl_fw_state & FW_ADDSTATE_OPTICAL_MEDIA) != 0 ? "OPTICAL" : "COPPER")); DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: %s: DHCPv4 STATE" " Enabled %s\n", ha->host_no, __func__, (ha->addl_fw_state & FW_ADDSTATE_DHCPv4_ENABLED) != 0 ? "YES" : "NO")); DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: %s: LINK %s\n", ha->host_no, __func__, (ha->addl_fw_state & FW_ADDSTATE_LINK_UP) != 0 ? "UP" : "DOWN")); DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: %s: iSNS Service " "Started %s\n", ha->host_no, __func__, (ha->addl_fw_state & FW_ADDSTATE_ISNS_SVC_ENABLED) != 0 ? "YES" : "NO")); ready = 1; break; } } DEBUG2(printk("scsi%ld: %s: waiting on fw, state=%x:%x - " "seconds expired= %d\n", ha->host_no, __func__, ha->firmware_state, ha->addl_fw_state, timeout_count)); if (is_qla4032(ha) && !(ha->addl_fw_state & FW_ADDSTATE_LINK_UP) && (timeout_count < ADAPTER_INIT_TOV - 5)) { break; } msleep(1000); } /* end of for */ if (timeout_count <= 0) DEBUG2(printk("scsi%ld: %s: FW Initialization timed out!\n", ha->host_no, __func__)); if (ha->firmware_state & FW_STATE_CONFIGURING_IP) { DEBUG2(printk("scsi%ld: %s: FW initialized, but is reporting " "it's waiting to configure an IP address\n", ha->host_no, __func__)); ready = 1; } else if (ha->firmware_state & FW_STATE_WAIT_AUTOCONNECT) { DEBUG2(printk("scsi%ld: %s: FW initialized, but " "auto-discovery still in process\n", ha->host_no, __func__)); ready = 1; } return ready; } /** * qla4xxx_init_firmware - initializes the firmware. * @ha: pointer to host adapter structure. * **/ static int qla4xxx_init_firmware(struct scsi_qla_host *ha) { int status = QLA_ERROR; if (is_aer_supported(ha) && test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags)) return status; /* For 82xx, stop firmware before initializing because if BIOS * has previously initialized firmware, then driver's initialize * firmware will fail. */ if (is_qla80XX(ha)) qla4_8xxx_stop_firmware(ha); ql4_printk(KERN_INFO, ha, "Initializing firmware..\n"); if (qla4xxx_initialize_fw_cb(ha) == QLA_ERROR) { DEBUG2(printk("scsi%ld: %s: Failed to initialize firmware " "control block\n", ha->host_no, __func__)); return status; } if (!qla4xxx_fw_ready(ha)) return status; if (is_qla80XX(ha) && !test_bit(AF_INIT_DONE, &ha->flags)) qla4xxx_alloc_fw_dump(ha); return qla4xxx_get_firmware_status(ha); } static void qla4xxx_set_model_info(struct scsi_qla_host *ha) { uint16_t board_id_string[8]; int i; int size = sizeof(ha->nvram->isp4022.boardIdStr); int offset = offsetof(struct eeprom_data, isp4022.boardIdStr) / 2; for (i = 0; i < (size / 2) ; i++) { board_id_string[i] = rd_nvram_word(ha, offset); offset += 1; } memcpy(ha->model_name, board_id_string, size); } static int qla4xxx_config_nvram(struct scsi_qla_host *ha) { unsigned long flags; union external_hw_config_reg extHwConfig; DEBUG2(printk("scsi%ld: %s: Get EEProm parameters \n", ha->host_no, __func__)); if (ql4xxx_lock_flash(ha) != QLA_SUCCESS) return QLA_ERROR; if (ql4xxx_lock_nvram(ha) != QLA_SUCCESS) { ql4xxx_unlock_flash(ha); return QLA_ERROR; } /* Get EEPRom Parameters from NVRAM and validate */ ql4_printk(KERN_INFO, ha, "Configuring NVRAM ...\n"); if (qla4xxx_is_nvram_configuration_valid(ha) == QLA_SUCCESS) { spin_lock_irqsave(&ha->hardware_lock, flags); extHwConfig.Asuint32_t = rd_nvram_word(ha, eeprom_ext_hw_conf_offset(ha)); spin_unlock_irqrestore(&ha->hardware_lock, flags); } else { ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: EEProm checksum invalid. " "Please update your EEPROM\n", ha->host_no, __func__); /* Attempt to set defaults */ if (is_qla4010(ha)) extHwConfig.Asuint32_t = 0x1912; else if (is_qla4022(ha) | is_qla4032(ha)) extHwConfig.Asuint32_t = 0x0023; else return QLA_ERROR; } if (is_qla4022(ha) || is_qla4032(ha)) qla4xxx_set_model_info(ha); else strcpy(ha->model_name, "QLA4010"); DEBUG(printk("scsi%ld: %s: Setting extHwConfig to 0xFFFF%04x\n", ha->host_no, __func__, extHwConfig.Asuint32_t)); spin_lock_irqsave(&ha->hardware_lock, flags); writel((0xFFFF << 16) | extHwConfig.Asuint32_t, isp_ext_hw_conf(ha)); readl(isp_ext_hw_conf(ha)); spin_unlock_irqrestore(&ha->hardware_lock, flags); ql4xxx_unlock_nvram(ha); ql4xxx_unlock_flash(ha); return QLA_SUCCESS; } /** * qla4_8xxx_pci_config() - Setup ISP82xx PCI configuration registers. * @ha: HA context */ void qla4_8xxx_pci_config(struct scsi_qla_host *ha) { pci_set_master(ha->pdev); } void qla4xxx_pci_config(struct scsi_qla_host *ha) { uint16_t w; int status; ql4_printk(KERN_INFO, ha, "Configuring PCI space...\n"); pci_set_master(ha->pdev); status = pci_set_mwi(ha->pdev); if (status) ql4_printk(KERN_WARNING, ha, "Failed to set MWI\n"); /* * We want to respect framework's setting of PCI configuration space * command register and also want to make sure that all bits of * interest to us are properly set in command register. */ pci_read_config_word(ha->pdev, PCI_COMMAND, &w); w |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR; w &= ~PCI_COMMAND_INTX_DISABLE; pci_write_config_word(ha->pdev, PCI_COMMAND, w); } static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha) { int status = QLA_ERROR; unsigned long max_wait_time; unsigned long flags; uint32_t mbox_status; ql4_printk(KERN_INFO, ha, "Starting firmware ...\n"); /* * Start firmware from flash ROM * * WORKAROUND: Stuff a non-constant value that the firmware can * use as a seed for a random number generator in MB7 prior to * setting BOOT_ENABLE. Fixes problem where the TCP * connections use the same TCP ports after each reboot, * causing some connections to not get re-established. */ DEBUG(printk("scsi%d: %s: Start firmware from flash ROM\n", ha->host_no, __func__)); spin_lock_irqsave(&ha->hardware_lock, flags); writel(jiffies, &ha->reg->mailbox[7]); if (is_qla4022(ha) | is_qla4032(ha)) writel(set_rmask(NVR_WRITE_ENABLE), &ha->reg->u1.isp4022.nvram); writel(2, &ha->reg->mailbox[6]); readl(&ha->reg->mailbox[6]); writel(set_rmask(CSR_BOOT_ENABLE), &ha->reg->ctrl_status); readl(&ha->reg->ctrl_status); spin_unlock_irqrestore(&ha->hardware_lock, flags); /* Wait for firmware to come UP. */ DEBUG2(printk(KERN_INFO "scsi%ld: %s: Wait up to %d seconds for " "boot firmware to complete...\n", ha->host_no, __func__, FIRMWARE_UP_TOV)); max_wait_time = jiffies + (FIRMWARE_UP_TOV * HZ); do { uint32_t ctrl_status; spin_lock_irqsave(&ha->hardware_lock, flags); ctrl_status = readw(&ha->reg->ctrl_status); mbox_status = readw(&ha->reg->mailbox[0]); spin_unlock_irqrestore(&ha->hardware_lock, flags); if (ctrl_status & set_rmask(CSR_SCSI_PROCESSOR_INTR)) break; if (mbox_status == MBOX_STS_COMMAND_COMPLETE) break; DEBUG2(printk(KERN_INFO "scsi%ld: %s: Waiting for boot " "firmware to complete... ctrl_sts=0x%x, remaining=%ld\n", ha->host_no, __func__, ctrl_status, max_wait_time)); msleep_interruptible(250); } while (!time_after_eq(jiffies, max_wait_time)); if (mbox_status == MBOX_STS_COMMAND_COMPLETE) { DEBUG(printk(KERN_INFO "scsi%ld: %s: Firmware has started\n", ha->host_no, __func__)); spin_lock_irqsave(&ha->hardware_lock, flags); writel(set_rmask(CSR_SCSI_PROCESSOR_INTR), &ha->reg->ctrl_status); readl(&ha->reg->ctrl_status); spin_unlock_irqrestore(&ha->hardware_lock, flags); status = QLA_SUCCESS; } else { printk(KERN_INFO "scsi%ld: %s: Boot firmware failed " "- mbox status 0x%x\n", ha->host_no, __func__, mbox_status); status = QLA_ERROR; } return status; } int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a) { #define QL4_LOCK_DRVR_WAIT 60 #define QL4_LOCK_DRVR_SLEEP 1 int drvr_wait = QL4_LOCK_DRVR_WAIT; while (drvr_wait) { if (ql4xxx_lock_drvr(a) == 0) { ssleep(QL4_LOCK_DRVR_SLEEP); DEBUG2(printk("scsi%ld: %s: Waiting for " "Global Init Semaphore(%d)...\n", a->host_no, __func__, drvr_wait)); drvr_wait -= QL4_LOCK_DRVR_SLEEP; } else { DEBUG2(printk("scsi%ld: %s: Global Init Semaphore " "acquired\n", a->host_no, __func__)); return QLA_SUCCESS; } } return QLA_ERROR; } /** * qla4xxx_start_firmware - starts qla4xxx firmware * @ha: Pointer to host adapter structure. * * This routine performs the necessary steps to start the firmware for * the QLA4010 adapter. **/ int qla4xxx_start_firmware(struct scsi_qla_host *ha) { unsigned long flags = 0; uint32_t mbox_status; int status = QLA_ERROR; int soft_reset = 1; int config_chip = 0; if (is_qla4022(ha) | is_qla4032(ha)) ql4xxx_set_mac_number(ha); if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS) return QLA_ERROR; spin_lock_irqsave(&ha->hardware_lock, flags); DEBUG2(printk("scsi%ld: %s: port_ctrl = 0x%08X\n", ha->host_no, __func__, readw(isp_port_ctrl(ha)))); DEBUG(printk("scsi%ld: %s: port_status = 0x%08X\n", ha->host_no, __func__, readw(isp_port_status(ha)))); /* Is Hardware already initialized? */ if ((readw(isp_port_ctrl(ha)) & 0x8000) != 0) { DEBUG(printk("scsi%ld: %s: Hardware has already been " "initialized\n", ha->host_no, __func__)); /* Receive firmware boot acknowledgement */ mbox_status = readw(&ha->reg->mailbox[0]); DEBUG2(printk("scsi%ld: %s: H/W Config complete - mbox[0]= " "0x%x\n", ha->host_no, __func__, mbox_status)); /* Is firmware already booted? */ if (mbox_status == 0) { /* F/W not running, must be config by net driver */ config_chip = 1; soft_reset = 0; } else { writel(set_rmask(CSR_SCSI_PROCESSOR_INTR), &ha->reg->ctrl_status); readl(&ha->reg->ctrl_status); writel(set_rmask(CSR_SCSI_COMPLETION_INTR), &ha->reg->ctrl_status); readl(&ha->reg->ctrl_status); spin_unlock_irqrestore(&ha->hardware_lock, flags); if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS) { DEBUG2(printk("scsi%ld: %s: Get firmware " "state -- state = 0x%x\n", ha->host_no, __func__, ha->firmware_state)); /* F/W is running */ if (ha->firmware_state & FW_STATE_CONFIG_WAIT) { DEBUG2(printk("scsi%ld: %s: Firmware " "in known state -- " "config and " "boot, state = 0x%x\n", ha->host_no, __func__, ha->firmware_state)); config_chip = 1; soft_reset = 0; } } else { DEBUG2(printk("scsi%ld: %s: Firmware in " "unknown state -- resetting," " state = " "0x%x\n", ha->host_no, __func__, ha->firmware_state)); } spin_lock_irqsave(&ha->hardware_lock, flags); } } else { DEBUG(printk("scsi%ld: %s: H/W initialization hasn't been " "started - resetting\n", ha->host_no, __func__)); } spin_unlock_irqrestore(&ha->hardware_lock, flags); DEBUG(printk("scsi%ld: %s: Flags soft_rest=%d, config= %d\n ", ha->host_no, __func__, soft_reset, config_chip)); if (soft_reset) { DEBUG(printk("scsi%ld: %s: Issue Soft Reset\n", ha->host_no, __func__)); status = qla4xxx_soft_reset(ha); /* NOTE: acquires drvr * lock again, but ok */ if (status == QLA_ERROR) { DEBUG(printk("scsi%d: %s: Soft Reset failed!\n", ha->host_no, __func__)); ql4xxx_unlock_drvr(ha); return QLA_ERROR; } config_chip = 1; /* Reset clears the semaphore, so acquire again */ if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS) return QLA_ERROR; } if (config_chip) { if ((status = qla4xxx_config_nvram(ha)) == QLA_SUCCESS) status = qla4xxx_start_firmware_from_flash(ha); } ql4xxx_unlock_drvr(ha); if (status == QLA_SUCCESS) { if (test_and_clear_bit(AF_GET_CRASH_RECORD, &ha->flags)) qla4xxx_get_crash_record(ha); qla4xxx_init_rings(ha); } else { DEBUG(printk("scsi%ld: %s: Firmware has NOT started\n", ha->host_no, __func__)); } return status; } /** * qla4xxx_free_ddb_index - Free DDBs reserved by firmware * @ha: pointer to adapter structure * * Since firmware is not running in autoconnect mode the DDB indices should * be freed so that when login happens from user space there are free DDB * indices available. **/ void qla4xxx_free_ddb_index(struct scsi_qla_host *ha) { int max_ddbs; int ret; uint32_t idx = 0, next_idx = 0; uint32_t state = 0, conn_err = 0; max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : MAX_DEV_DB_ENTRIES; for (idx = 0; idx < max_ddbs; idx = next_idx) { ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL, &next_idx, &state, &conn_err, NULL, NULL); if (ret == QLA_ERROR) { next_idx++; continue; } if (state == DDB_DS_NO_CONNECTION_ACTIVE || state == DDB_DS_SESSION_FAILED) { DEBUG2(ql4_printk(KERN_INFO, ha, "Freeing DDB index = 0x%x\n", idx)); ret = qla4xxx_clear_ddb_entry(ha, idx); if (ret == QLA_ERROR) ql4_printk(KERN_ERR, ha, "Unable to clear DDB index = " "0x%x\n", idx); } if (next_idx == 0) break; } } /** * qla4xxx_initialize_adapter - initiailizes hba * @ha: Pointer to host adapter structure. * @is_reset: Is this init path or reset path * * This routine parforms all of the steps necessary to initialize the adapter. * **/ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset) { int status = QLA_ERROR; ha->eeprom_cmd_data = 0; ql4_printk(KERN_INFO, ha, "Configuring PCI space...\n"); ha->isp_ops->pci_config(ha); ha->isp_ops->disable_intrs(ha); /* Initialize the Host adapter request/response queues and firmware */ if (ha->isp_ops->start_firmware(ha) == QLA_ERROR) goto exit_init_hba; /* * For ISP83XX, mailbox and IOCB interrupts are enabled separately. * Mailbox interrupts must be enabled prior to issuing any mailbox * command in order to prevent the possibility of losing interrupts * while switching from polling to interrupt mode. IOCB interrupts are * enabled via isp_ops->enable_intrs. */ if (is_qla8032(ha) || is_qla8042(ha)) qla4_83xx_enable_mbox_intrs(ha); if (qla4xxx_about_firmware(ha) == QLA_ERROR) goto exit_init_hba; if (ha->isp_ops->get_sys_info(ha) == QLA_ERROR) goto exit_init_hba; qla4xxx_init_local_data(ha); status = qla4xxx_init_firmware(ha); if (status == QLA_ERROR) goto exit_init_hba; if (is_reset == RESET_ADAPTER) qla4xxx_build_ddb_list(ha, is_reset); set_bit(AF_ONLINE, &ha->flags); exit_init_hba: DEBUG2(printk("scsi%ld: initialize adapter: %s\n", ha->host_no, status == QLA_ERROR ? "FAILED" : "SUCCEEDED")); return status; } int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index, struct ddb_entry *ddb_entry, uint32_t state) { uint32_t old_fw_ddb_device_state; int status = QLA_ERROR; old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state; DEBUG2(ql4_printk(KERN_INFO, ha, "%s: DDB - old state = 0x%x, new state = 0x%x for " "index [%d]\n", __func__, ddb_entry->fw_ddb_device_state, state, fw_ddb_index)); ddb_entry->fw_ddb_device_state = state; switch (old_fw_ddb_device_state) { case DDB_DS_LOGIN_IN_PROCESS: switch (state) { case DDB_DS_SESSION_ACTIVE: case DDB_DS_DISCOVERY: qla4xxx_update_session_conn_param(ha, ddb_entry); ddb_entry->unblock_sess(ddb_entry->sess); status = QLA_SUCCESS; break; case DDB_DS_SESSION_FAILED: case DDB_DS_NO_CONNECTION_ACTIVE: iscsi_conn_login_event(ddb_entry->conn, ISCSI_CONN_STATE_FREE); status = QLA_SUCCESS; break; } break; case DDB_DS_SESSION_ACTIVE: case DDB_DS_DISCOVERY: switch (state) { case DDB_DS_SESSION_FAILED: /* * iscsi_session failure will cause userspace to * stop the connection which in turn would block the * iscsi_session and start relogin */ iscsi_session_failure(ddb_entry->sess->dd_data, ISCSI_ERR_CONN_FAILED); status = QLA_SUCCESS; break; case DDB_DS_NO_CONNECTION_ACTIVE: clear_bit(fw_ddb_index, ha->ddb_idx_map); status = QLA_SUCCESS; break; } break; case DDB_DS_SESSION_FAILED: switch (state) { case DDB_DS_SESSION_ACTIVE: case DDB_DS_DISCOVERY: ddb_entry->unblock_sess(ddb_entry->sess); qla4xxx_update_session_conn_param(ha, ddb_entry); status = QLA_SUCCESS; break; case DDB_DS_SESSION_FAILED: iscsi_session_failure(ddb_entry->sess->dd_data, ISCSI_ERR_CONN_FAILED); status = QLA_SUCCESS; break; } break; default: DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unknown Event\n", __func__)); break; } return status; } void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry) { /* * This triggers a relogin. After the relogin_timer * expires, the relogin gets scheduled. We must wait a * minimum amount of time since receiving an 0x8014 AEN * with failed device_state or a logout response before * we can issue another relogin. * * Firmware pads this timeout: (time2wait +1). * Driver retry to login should be longer than F/W. * Otherwise F/W will fail * set_ddb() mbx cmd with 0x4005 since it still * counting down its time2wait. */ atomic_set(&ddb_entry->relogin_timer, 0); atomic_set(&ddb_entry->retry_relogin_timer, ddb_entry->default_time2wait + 4); } int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index, struct ddb_entry *ddb_entry, uint32_t state) { uint32_t old_fw_ddb_device_state; int status = QLA_ERROR; old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state; DEBUG2(ql4_printk(KERN_INFO, ha, "%s: DDB - old state = 0x%x, new state = 0x%x for " "index [%d]\n", __func__, ddb_entry->fw_ddb_device_state, state, fw_ddb_index)); ddb_entry->fw_ddb_device_state = state; switch (old_fw_ddb_device_state) { case DDB_DS_LOGIN_IN_PROCESS: case DDB_DS_NO_CONNECTION_ACTIVE: switch (state) { case DDB_DS_SESSION_ACTIVE: ddb_entry->unblock_sess(ddb_entry->sess); qla4xxx_update_session_conn_fwddb_param(ha, ddb_entry); status = QLA_SUCCESS; break; case DDB_DS_SESSION_FAILED: iscsi_block_session(ddb_entry->sess); if (!test_bit(DF_RELOGIN, &ddb_entry->flags)) qla4xxx_arm_relogin_timer(ddb_entry); status = QLA_SUCCESS; break; } break; case DDB_DS_SESSION_ACTIVE: switch (state) { case DDB_DS_SESSION_FAILED: iscsi_block_session(ddb_entry->sess); if (!test_bit(DF_RELOGIN, &ddb_entry->flags)) qla4xxx_arm_relogin_timer(ddb_entry); status = QLA_SUCCESS; break; } break; case DDB_DS_SESSION_FAILED: switch (state) { case DDB_DS_SESSION_ACTIVE: ddb_entry->unblock_sess(ddb_entry->sess); qla4xxx_update_session_conn_fwddb_param(ha, ddb_entry); status = QLA_SUCCESS; break; case DDB_DS_SESSION_FAILED: if (!test_bit(DF_RELOGIN, &ddb_entry->flags)) qla4xxx_arm_relogin_timer(ddb_entry); status = QLA_SUCCESS; break; } break; default: DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unknown Event\n", __func__)); break; } return status; } /** * qla4xxx_process_ddb_changed - process ddb state change * @ha: Pointer to host adapter structure. * @fw_ddb_index: Firmware's device database index * @state: Device state * @conn_err: Unused * * This routine processes a Decive Database Changed AEN Event. **/ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index, uint32_t state, uint32_t conn_err) { struct ddb_entry *ddb_entry; /* check for out of range index */ if (fw_ddb_index >= MAX_DDB_ENTRIES) goto exit_ddb_event; /* Get the corresponging ddb entry */ ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index); /* Device does not currently exist in our database. */ if (ddb_entry == NULL) { ql4_printk(KERN_ERR, ha, "%s: No ddb_entry at FW index [%d]\n", __func__, fw_ddb_index); if (state == DDB_DS_NO_CONNECTION_ACTIVE) clear_bit(fw_ddb_index, ha->ddb_idx_map); goto exit_ddb_event; } ddb_entry->ddb_change(ha, fw_ddb_index, ddb_entry, state); exit_ddb_event: return QLA_ERROR; } /** * qla4xxx_login_flash_ddb - Login to target (DDB) * @cls_session: Pointer to the session to login * * This routine logins to the target. * Issues setddb and conn open mbx **/ void qla4xxx_login_flash_ddb(struct iscsi_cls_session *cls_session) { struct iscsi_session *sess; struct ddb_entry *ddb_entry; struct scsi_qla_host *ha; struct dev_db_entry *fw_ddb_entry = NULL; dma_addr_t fw_ddb_dma; uint32_t mbx_sts = 0; int ret; sess = cls_session->dd_data; ddb_entry = sess->dd_data; ha = ddb_entry->ha; if (!test_bit(AF_LINK_UP, &ha->flags)) return; if (ddb_entry->ddb_type != FLASH_DDB) { DEBUG2(ql4_printk(KERN_INFO, ha, "Skipping login to non FLASH DB")); goto exit_login; } fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, &fw_ddb_dma); if (fw_ddb_entry == NULL) { DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n")); goto exit_login; } if (ddb_entry->fw_ddb_index == INVALID_ENTRY) { ret = qla4xxx_get_ddb_index(ha, &ddb_entry->fw_ddb_index); if (ret == QLA_ERROR) goto exit_login; ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry; ha->tot_ddbs++; } memcpy(fw_ddb_entry, &ddb_entry->fw_ddb_entry, sizeof(struct dev_db_entry)); ddb_entry->sess->target_id = ddb_entry->fw_ddb_index; ret = qla4xxx_set_ddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_dma, &mbx_sts); if (ret == QLA_ERROR) { DEBUG2(ql4_printk(KERN_ERR, ha, "Set DDB failed\n")); goto exit_login; } ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS; ret = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index); if (ret == QLA_ERROR) { ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__, sess->targetname); goto exit_login; } exit_login: if (fw_ddb_entry) dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); }
linux-master
drivers/scsi/qla4xxx/ql4_init.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. * Copyright (c) 2014- QLogic Corporation. * All rights reserved * www.qlogic.com * * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. */ /* * fcpim.c - FCP initiator mode i-t nexus state machine */ #include "bfad_drv.h" #include "bfa_fcs.h" #include "bfa_fcbuild.h" #include "bfad_im.h" BFA_TRC_FILE(FCS, FCPIM); /* * forward declarations */ static void bfa_fcs_itnim_timeout(void *arg); static void bfa_fcs_itnim_free(struct bfa_fcs_itnim_s *itnim); static void bfa_fcs_itnim_send_prli(void *itnim_cbarg, struct bfa_fcxp_s *fcxp_alloced); static void bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs); static void bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim, enum bfa_itnim_aen_event event); static void bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event); static void bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event); static void bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event); static void bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event); static void bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event); static void bfa_fcs_itnim_sm_hal_rport_online(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event); static void bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event); static void bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event); static void bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event); static struct bfa_sm_table_s itnim_sm_table[] = { {BFA_SM(bfa_fcs_itnim_sm_offline), BFA_ITNIM_OFFLINE}, {BFA_SM(bfa_fcs_itnim_sm_prli_send), BFA_ITNIM_PRLI_SEND}, {BFA_SM(bfa_fcs_itnim_sm_prli), BFA_ITNIM_PRLI_SENT}, {BFA_SM(bfa_fcs_itnim_sm_prli_retry), BFA_ITNIM_PRLI_RETRY}, {BFA_SM(bfa_fcs_itnim_sm_hcb_online), BFA_ITNIM_HCB_ONLINE}, {BFA_SM(bfa_fcs_itnim_sm_online), BFA_ITNIM_ONLINE}, {BFA_SM(bfa_fcs_itnim_sm_hcb_offline), BFA_ITNIM_HCB_OFFLINE}, {BFA_SM(bfa_fcs_itnim_sm_initiator), BFA_ITNIM_INITIATIOR}, }; /* * fcs_itnim_sm FCS itnim state machine */ static void bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) { bfa_trc(itnim->fcs, itnim->rport->pwwn); bfa_trc(itnim->fcs, event); switch (event) { case BFA_FCS_ITNIM_SM_FCS_ONLINE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_send); itnim->prli_retries = 0; bfa_fcs_itnim_send_prli(itnim, NULL); break; case BFA_FCS_ITNIM_SM_OFFLINE: bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE); break; case BFA_FCS_ITNIM_SM_INITIATOR: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator); break; case BFA_FCS_ITNIM_SM_DELETE: bfa_fcs_itnim_free(itnim); break; default: bfa_sm_fault(itnim->fcs, event); } } static void bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) { bfa_trc(itnim->fcs, itnim->rport->pwwn); bfa_trc(itnim->fcs, event); switch (event) { case BFA_FCS_ITNIM_SM_FRMSENT: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli); break; case BFA_FCS_ITNIM_SM_INITIATOR: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator); bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe); bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE); break; case BFA_FCS_ITNIM_SM_OFFLINE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe); bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE); break; case BFA_FCS_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe); bfa_fcs_itnim_free(itnim); break; default: bfa_sm_fault(itnim->fcs, event); } } static void bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) { bfa_trc(itnim->fcs, itnim->rport->pwwn); bfa_trc(itnim->fcs, event); switch (event) { case BFA_FCS_ITNIM_SM_RSP_OK: if (itnim->rport->scsi_function == BFA_RPORT_INITIATOR) bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator); else bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hal_rport_online); bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE); break; case BFA_FCS_ITNIM_SM_RSP_ERROR: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_retry); bfa_timer_start(itnim->fcs->bfa, &itnim->timer, bfa_fcs_itnim_timeout, itnim, BFA_FCS_RETRY_TIMEOUT); break; case BFA_FCS_ITNIM_SM_RSP_NOT_SUPP: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); break; case BFA_FCS_ITNIM_SM_OFFLINE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_fcxp_discard(itnim->fcxp); bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE); break; case BFA_FCS_ITNIM_SM_INITIATOR: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator); bfa_fcxp_discard(itnim->fcxp); bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE); break; case BFA_FCS_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_fcxp_discard(itnim->fcxp); bfa_fcs_itnim_free(itnim); break; default: bfa_sm_fault(itnim->fcs, event); } } static void bfa_fcs_itnim_sm_hal_rport_online(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) { bfa_trc(itnim->fcs, itnim->rport->pwwn); bfa_trc(itnim->fcs, event); switch (event) { case BFA_FCS_ITNIM_SM_HAL_ONLINE: if (!itnim->bfa_itnim) itnim->bfa_itnim = bfa_itnim_create(itnim->fcs->bfa, itnim->rport->bfa_rport, itnim); if (itnim->bfa_itnim) { bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_online); bfa_itnim_online(itnim->bfa_itnim, itnim->seq_rec); } else { bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_sm_send_event(itnim->rport, RPSM_EVENT_DELETE); } break; case BFA_FCS_ITNIM_SM_OFFLINE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE); break; case BFA_FCS_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_fcs_itnim_free(itnim); break; default: bfa_sm_fault(itnim->fcs, event); } } static void bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) { bfa_trc(itnim->fcs, itnim->rport->pwwn); bfa_trc(itnim->fcs, event); switch (event) { case BFA_FCS_ITNIM_SM_TIMEOUT: if (itnim->prli_retries < BFA_FCS_RPORT_MAX_RETRIES) { itnim->prli_retries++; bfa_trc(itnim->fcs, itnim->prli_retries); bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_send); bfa_fcs_itnim_send_prli(itnim, NULL); } else { /* invoke target offline */ bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_sm_send_event(itnim->rport, RPSM_EVENT_LOGO_IMP); } break; case BFA_FCS_ITNIM_SM_OFFLINE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_timer_stop(&itnim->timer); bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE); break; case BFA_FCS_ITNIM_SM_INITIATOR: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator); bfa_timer_stop(&itnim->timer); bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE); break; case BFA_FCS_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_timer_stop(&itnim->timer); bfa_fcs_itnim_free(itnim); break; default: bfa_sm_fault(itnim->fcs, event); } } static void bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) { struct bfad_s *bfad = (struct bfad_s *)itnim->fcs->bfad; char lpwwn_buf[BFA_STRING_32]; char rpwwn_buf[BFA_STRING_32]; bfa_trc(itnim->fcs, itnim->rport->pwwn); bfa_trc(itnim->fcs, event); switch (event) { case BFA_FCS_ITNIM_SM_HCB_ONLINE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_online); bfa_fcb_itnim_online(itnim->itnim_drv); wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(itnim->rport->port)); wwn2str(rpwwn_buf, itnim->rport->pwwn); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Target (WWN = %s) is online for initiator (WWN = %s)\n", rpwwn_buf, lpwwn_buf); bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_ONLINE); break; case BFA_FCS_ITNIM_SM_OFFLINE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_offline); bfa_itnim_offline(itnim->bfa_itnim); break; case BFA_FCS_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_fcs_itnim_free(itnim); break; default: bfa_sm_fault(itnim->fcs, event); } } static void bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) { struct bfad_s *bfad = (struct bfad_s *)itnim->fcs->bfad; char lpwwn_buf[BFA_STRING_32]; char rpwwn_buf[BFA_STRING_32]; bfa_trc(itnim->fcs, itnim->rport->pwwn); bfa_trc(itnim->fcs, event); switch (event) { case BFA_FCS_ITNIM_SM_OFFLINE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_offline); bfa_fcb_itnim_offline(itnim->itnim_drv); bfa_itnim_offline(itnim->bfa_itnim); wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(itnim->rport->port)); wwn2str(rpwwn_buf, itnim->rport->pwwn); if (bfa_fcs_lport_is_online(itnim->rport->port) == BFA_TRUE) { BFA_LOG(KERN_ERR, bfad, bfa_log_level, "Target (WWN = %s) connectivity lost for " "initiator (WWN = %s)\n", rpwwn_buf, lpwwn_buf); bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_DISCONNECT); } else { BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Target (WWN = %s) offlined by initiator (WWN = %s)\n", rpwwn_buf, lpwwn_buf); bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_OFFLINE); } break; case BFA_FCS_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_fcs_itnim_free(itnim); break; default: bfa_sm_fault(itnim->fcs, event); } } static void bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) { bfa_trc(itnim->fcs, itnim->rport->pwwn); bfa_trc(itnim->fcs, event); switch (event) { case BFA_FCS_ITNIM_SM_HCB_OFFLINE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE); break; case BFA_FCS_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_fcs_itnim_free(itnim); break; default: bfa_sm_fault(itnim->fcs, event); } } /* * This state is set when a discovered rport is also in intiator mode. * This ITN is marked as no_op and is not active and will not be truned into * online state. */ static void bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) { bfa_trc(itnim->fcs, itnim->rport->pwwn); bfa_trc(itnim->fcs, event); switch (event) { case BFA_FCS_ITNIM_SM_OFFLINE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE); break; /* * fcs_online is expected here for well known initiator ports */ case BFA_FCS_ITNIM_SM_FCS_ONLINE: bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE); break; case BFA_FCS_ITNIM_SM_RSP_ERROR: case BFA_FCS_ITNIM_SM_INITIATOR: break; case BFA_FCS_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_fcs_itnim_free(itnim); break; default: bfa_sm_fault(itnim->fcs, event); } } static void bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim, enum bfa_itnim_aen_event event) { struct bfa_fcs_rport_s *rport = itnim->rport; struct bfad_s *bfad = (struct bfad_s *)itnim->fcs->bfad; struct bfa_aen_entry_s *aen_entry; /* Don't post events for well known addresses */ if (BFA_FCS_PID_IS_WKA(rport->pid)) return; bfad_get_aen_entry(bfad, aen_entry); if (!aen_entry) return; aen_entry->aen_data.itnim.vf_id = rport->port->fabric->vf_id; aen_entry->aen_data.itnim.ppwwn = bfa_fcs_lport_get_pwwn( bfa_fcs_get_base_port(itnim->fcs)); aen_entry->aen_data.itnim.lpwwn = bfa_fcs_lport_get_pwwn(rport->port); aen_entry->aen_data.itnim.rpwwn = rport->pwwn; /* Send the AEN notification */ bfad_im_post_vendor_event(aen_entry, bfad, ++rport->fcs->fcs_aen_seq, BFA_AEN_CAT_ITNIM, event); } static void bfa_fcs_itnim_send_prli(void *itnim_cbarg, struct bfa_fcxp_s *fcxp_alloced) { struct bfa_fcs_itnim_s *itnim = itnim_cbarg; struct bfa_fcs_rport_s *rport = itnim->rport; struct bfa_fcs_lport_s *port = rport->port; struct fchs_s fchs; struct bfa_fcxp_s *fcxp; int len; bfa_trc(itnim->fcs, itnim->rport->pwwn); fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE); if (!fcxp) { itnim->stats.fcxp_alloc_wait++; bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &itnim->fcxp_wqe, bfa_fcs_itnim_send_prli, itnim, BFA_TRUE); return; } itnim->fcxp = fcxp; len = fc_prli_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), itnim->rport->pid, bfa_fcs_lport_get_fcid(port), 0); bfa_fcxp_send(fcxp, rport->bfa_rport, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, bfa_fcs_itnim_prli_response, (void *)itnim, FC_MAX_PDUSZ, FC_ELS_TOV); itnim->stats.prli_sent++; bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_FRMSENT); } static void bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs) { struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cbarg; struct fc_els_cmd_s *els_cmd; struct fc_prli_s *prli_resp; struct fc_ls_rjt_s *ls_rjt; struct fc_prli_params_s *sparams; bfa_trc(itnim->fcs, req_status); /* * Sanity Checks */ if (req_status != BFA_STATUS_OK) { itnim->stats.prli_rsp_err++; bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_ERROR); return; } els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp); if (els_cmd->els_code == FC_ELS_ACC) { prli_resp = (struct fc_prli_s *) els_cmd; if (fc_prli_rsp_parse(prli_resp, rsp_len) != FC_PARSE_OK) { bfa_trc(itnim->fcs, rsp_len); /* * Check if this r-port is also in Initiator mode. * If so, we need to set this ITN as a no-op. */ if (prli_resp->parampage.servparams.initiator) { bfa_trc(itnim->fcs, prli_resp->parampage.type); itnim->rport->scsi_function = BFA_RPORT_INITIATOR; itnim->stats.prli_rsp_acc++; itnim->stats.initiator++; bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_OK); return; } itnim->stats.prli_rsp_parse_err++; return; } itnim->rport->scsi_function = BFA_RPORT_TARGET; sparams = &prli_resp->parampage.servparams; itnim->seq_rec = sparams->retry; itnim->rec_support = sparams->rec_support; itnim->task_retry_id = sparams->task_retry_id; itnim->conf_comp = sparams->confirm; itnim->stats.prli_rsp_acc++; bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_OK); } else { ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp); bfa_trc(itnim->fcs, ls_rjt->reason_code); bfa_trc(itnim->fcs, ls_rjt->reason_code_expl); itnim->stats.prli_rsp_rjt++; if (ls_rjt->reason_code == FC_LS_RJT_RSN_CMD_NOT_SUPP) { bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_NOT_SUPP); return; } bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_ERROR); } } static void bfa_fcs_itnim_timeout(void *arg) { struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) arg; itnim->stats.timeout++; bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_TIMEOUT); } static void bfa_fcs_itnim_free(struct bfa_fcs_itnim_s *itnim) { if (itnim->bfa_itnim) { bfa_itnim_delete(itnim->bfa_itnim); itnim->bfa_itnim = NULL; } bfa_fcb_itnim_free(itnim->fcs->bfad, itnim->itnim_drv); } /* * itnim_public FCS ITNIM public interfaces */ /* * Called by rport when a new rport is created. * * @param[in] rport - remote port. */ struct bfa_fcs_itnim_s * bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport) { struct bfa_fcs_lport_s *port = rport->port; struct bfa_fcs_itnim_s *itnim; struct bfad_itnim_s *itnim_drv; int ret; /* * call bfad to allocate the itnim */ ret = bfa_fcb_itnim_alloc(port->fcs->bfad, &itnim, &itnim_drv); if (ret) { bfa_trc(port->fcs, rport->pwwn); return NULL; } /* * Initialize itnim */ itnim->rport = rport; itnim->fcs = rport->fcs; itnim->itnim_drv = itnim_drv; itnim->bfa_itnim = NULL; itnim->seq_rec = BFA_FALSE; itnim->rec_support = BFA_FALSE; itnim->conf_comp = BFA_FALSE; itnim->task_retry_id = BFA_FALSE; /* * Set State machine */ bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); return itnim; } /* * Called by rport to delete the instance of FCPIM. * * @param[in] rport - remote port. */ void bfa_fcs_itnim_delete(struct bfa_fcs_itnim_s *itnim) { bfa_trc(itnim->fcs, itnim->rport->pid); bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_DELETE); } /* * Notification from rport that PLOGI is complete to initiate FC-4 session. */ void bfa_fcs_itnim_brp_online(struct bfa_fcs_itnim_s *itnim) { itnim->stats.onlines++; if (!BFA_FCS_PID_IS_WKA(itnim->rport->pid)) bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HAL_ONLINE); } /* * Called by rport to handle a remote device offline. */ void bfa_fcs_itnim_rport_offline(struct bfa_fcs_itnim_s *itnim) { itnim->stats.offlines++; bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_OFFLINE); } /* * Called by rport when remote port is known to be an initiator from * PRLI received. */ void bfa_fcs_itnim_is_initiator(struct bfa_fcs_itnim_s *itnim) { bfa_trc(itnim->fcs, itnim->rport->pid); itnim->stats.initiator++; bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_INITIATOR); } /* * Called by rport to check if the itnim is online. */ bfa_status_t bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim) { bfa_trc(itnim->fcs, itnim->rport->pid); switch (bfa_sm_to_state(itnim_sm_table, itnim->sm)) { case BFA_ITNIM_ONLINE: case BFA_ITNIM_INITIATIOR: return BFA_STATUS_OK; default: return BFA_STATUS_NO_FCPIM_NEXUS; } } /* * BFA completion callback for bfa_itnim_online(). */ void bfa_cb_itnim_online(void *cbarg) { struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cbarg; bfa_trc(itnim->fcs, itnim->rport->pwwn); bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_ONLINE); } /* * BFA completion callback for bfa_itnim_offline(). */ void bfa_cb_itnim_offline(void *cb_arg) { struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg; bfa_trc(itnim->fcs, itnim->rport->pwwn); bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_OFFLINE); } /* * Mark the beginning of PATH TOV handling. IO completion callbacks * are still pending. */ void bfa_cb_itnim_tov_begin(void *cb_arg) { struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg; bfa_trc(itnim->fcs, itnim->rport->pwwn); } /* * Mark the end of PATH TOV handling. All pending IOs are already cleaned up. */ void bfa_cb_itnim_tov(void *cb_arg) { struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg; struct bfad_itnim_s *itnim_drv = itnim->itnim_drv; bfa_trc(itnim->fcs, itnim->rport->pwwn); itnim_drv->state = ITNIM_STATE_TIMEOUT; } /* * BFA notification to FCS/driver for second level error recovery. * * Atleast one I/O request has timedout and target is unresponsive to * repeated abort requests. Second level error recovery should be initiated * by starting implicit logout and recovery procedures. */ void bfa_cb_itnim_sler(void *cb_arg) { struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg; itnim->stats.sler++; bfa_trc(itnim->fcs, itnim->rport->pwwn); bfa_sm_send_event(itnim->rport, RPSM_EVENT_LOGO_IMP); } struct bfa_fcs_itnim_s * bfa_fcs_itnim_lookup(struct bfa_fcs_lport_s *port, wwn_t rpwwn) { struct bfa_fcs_rport_s *rport; rport = bfa_fcs_rport_lookup(port, rpwwn); if (!rport) return NULL; WARN_ON(rport->itnim == NULL); return rport->itnim; } bfa_status_t bfa_fcs_itnim_attr_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn, struct bfa_itnim_attr_s *attr) { struct bfa_fcs_itnim_s *itnim = NULL; itnim = bfa_fcs_itnim_lookup(port, rpwwn); if (itnim == NULL) return BFA_STATUS_NO_FCPIM_NEXUS; attr->state = bfa_sm_to_state(itnim_sm_table, itnim->sm); attr->retry = itnim->seq_rec; attr->rec_support = itnim->rec_support; attr->conf_comp = itnim->conf_comp; attr->task_retry_id = itnim->task_retry_id; return BFA_STATUS_OK; } bfa_status_t bfa_fcs_itnim_stats_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn, struct bfa_itnim_stats_s *stats) { struct bfa_fcs_itnim_s *itnim = NULL; WARN_ON(port == NULL); itnim = bfa_fcs_itnim_lookup(port, rpwwn); if (itnim == NULL) return BFA_STATUS_NO_FCPIM_NEXUS; memcpy(stats, &itnim->stats, sizeof(struct bfa_itnim_stats_s)); return BFA_STATUS_OK; } bfa_status_t bfa_fcs_itnim_stats_clear(struct bfa_fcs_lport_s *port, wwn_t rpwwn) { struct bfa_fcs_itnim_s *itnim = NULL; WARN_ON(port == NULL); itnim = bfa_fcs_itnim_lookup(port, rpwwn); if (itnim == NULL) return BFA_STATUS_NO_FCPIM_NEXUS; memset(&itnim->stats, 0, sizeof(struct bfa_itnim_stats_s)); return BFA_STATUS_OK; } void bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim, struct fchs_s *fchs, u16 len) { struct fc_els_cmd_s *els_cmd; bfa_trc(itnim->fcs, fchs->type); if (fchs->type != FC_TYPE_ELS) return; els_cmd = (struct fc_els_cmd_s *) (fchs + 1); bfa_trc(itnim->fcs, els_cmd->els_code); switch (els_cmd->els_code) { case FC_ELS_PRLO: bfa_fcs_rport_prlo(itnim->rport, fchs->ox_id); break; default: WARN_ON(1); } }
linux-master
drivers/scsi/bfa/bfa_fcs_fcpim.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. * Copyright (c) 2014- QLogic Corporation. * All rights reserved * www.qlogic.com * * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. */ #include "bfad_drv.h" #include "bfa_ioc.h" #include "bfi_reg.h" #include "bfa_defs.h" BFA_TRC_FILE(CNA, IOC_CB); #define bfa_ioc_cb_join_pos(__ioc) ((u32) (1 << BFA_IOC_CB_JOIN_SH)) /* * forward declarations */ static bfa_boolean_t bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc); static void bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc); static void bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc); static void bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc); static void bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix); static void bfa_ioc_cb_notify_fail(struct bfa_ioc_s *ioc); static void bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc); static bfa_boolean_t bfa_ioc_cb_sync_start(struct bfa_ioc_s *ioc); static void bfa_ioc_cb_sync_join(struct bfa_ioc_s *ioc); static void bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc); static void bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc); static bfa_boolean_t bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc); static void bfa_ioc_cb_set_cur_ioc_fwstate( struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate); static enum bfi_ioc_state bfa_ioc_cb_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc); static void bfa_ioc_cb_set_alt_ioc_fwstate( struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate); static enum bfi_ioc_state bfa_ioc_cb_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc); static struct bfa_ioc_hwif_s hwif_cb; /* * Called from bfa_ioc_attach() to map asic specific calls. */ void bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc) { hwif_cb.ioc_pll_init = bfa_ioc_cb_pll_init; hwif_cb.ioc_firmware_lock = bfa_ioc_cb_firmware_lock; hwif_cb.ioc_firmware_unlock = bfa_ioc_cb_firmware_unlock; hwif_cb.ioc_reg_init = bfa_ioc_cb_reg_init; hwif_cb.ioc_map_port = bfa_ioc_cb_map_port; hwif_cb.ioc_isr_mode_set = bfa_ioc_cb_isr_mode_set; hwif_cb.ioc_notify_fail = bfa_ioc_cb_notify_fail; hwif_cb.ioc_ownership_reset = bfa_ioc_cb_ownership_reset; hwif_cb.ioc_sync_start = bfa_ioc_cb_sync_start; hwif_cb.ioc_sync_join = bfa_ioc_cb_sync_join; hwif_cb.ioc_sync_leave = bfa_ioc_cb_sync_leave; hwif_cb.ioc_sync_ack = bfa_ioc_cb_sync_ack; hwif_cb.ioc_sync_complete = bfa_ioc_cb_sync_complete; hwif_cb.ioc_set_fwstate = bfa_ioc_cb_set_cur_ioc_fwstate; hwif_cb.ioc_get_fwstate = bfa_ioc_cb_get_cur_ioc_fwstate; hwif_cb.ioc_set_alt_fwstate = bfa_ioc_cb_set_alt_ioc_fwstate; hwif_cb.ioc_get_alt_fwstate = bfa_ioc_cb_get_alt_ioc_fwstate; ioc->ioc_hwif = &hwif_cb; } /* * Return true if firmware of current driver matches the running firmware. */ static bfa_boolean_t bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc) { enum bfi_ioc_state alt_fwstate, cur_fwstate; struct bfi_ioc_image_hdr_s fwhdr; cur_fwstate = bfa_ioc_cb_get_cur_ioc_fwstate(ioc); bfa_trc(ioc, cur_fwstate); alt_fwstate = bfa_ioc_cb_get_alt_ioc_fwstate(ioc); bfa_trc(ioc, alt_fwstate); /* * Uninit implies this is the only driver as of now. */ if (cur_fwstate == BFI_IOC_UNINIT) return BFA_TRUE; /* * Check if another driver with a different firmware is active */ bfa_ioc_fwver_get(ioc, &fwhdr); if (!bfa_ioc_fwver_cmp(ioc, &fwhdr) && alt_fwstate != BFI_IOC_DISABLED) { bfa_trc(ioc, alt_fwstate); return BFA_FALSE; } return BFA_TRUE; } static void bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc) { } /* * Notify other functions on HB failure. */ static void bfa_ioc_cb_notify_fail(struct bfa_ioc_s *ioc) { writel(~0U, ioc->ioc_regs.err_set); readl(ioc->ioc_regs.err_set); } /* * Host to LPU mailbox message addresses */ static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = { { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 }, { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 } }; /* * Host <-> LPU mailbox command/status registers */ static struct { u32 hfn, lpu; } iocreg_mbcmd[] = { { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT }, { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT } }; static void bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc) { void __iomem *rb; int pcifn = bfa_ioc_pcifn(ioc); rb = bfa_ioc_bar0(ioc); ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox; ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox; ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn; if (ioc->port_id == 0) { ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG; } else { ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG); ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG); ioc->ioc_regs.alt_ioc_fwstate = (rb + BFA_IOC0_STATE_REG); } /* * Host <-> LPU mailbox command/status registers */ ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd[pcifn].hfn; ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd[pcifn].lpu; /* * PSS control registers */ ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG); ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG); ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_LCLK_CTL_REG); ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_SCLK_CTL_REG); /* * IOC semaphore registers and serialization */ ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG); ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG); /* * sram memory access */ ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START); ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CB; /* * err set reg : for notification of hb failure */ ioc->ioc_regs.err_set = (rb + ERR_SET_REG); } /* * Initialize IOC to port mapping. */ static void bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc) { /* * For crossbow, port id is same as pci function. */ ioc->port_id = bfa_ioc_pcifn(ioc); bfa_trc(ioc, ioc->port_id); } /* * Set interrupt mode for a function: INTX or MSIX */ static void bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix) { } /* * Synchronized IOC failure processing routines */ static bfa_boolean_t bfa_ioc_cb_sync_start(struct bfa_ioc_s *ioc) { u32 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate); /** * Driver load time. If the join bit is set, * it is due to an unclean exit by the driver for this * PCI fn in the previous incarnation. Whoever comes here first * should clean it up, no matter which PCI fn. */ if (ioc_fwstate & BFA_IOC_CB_JOIN_MASK) { writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate); writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate); return BFA_TRUE; } return bfa_ioc_cb_sync_complete(ioc); } /* * Cleanup hw semaphore and usecnt registers */ static void bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc) { /* * Read the hw sem reg to make sure that it is locked * before we clear it. If it is not locked, writing 1 * will lock it instead of clearing it. */ readl(ioc->ioc_regs.ioc_sem_reg); writel(1, ioc->ioc_regs.ioc_sem_reg); } /* * Synchronized IOC failure processing routines */ static void bfa_ioc_cb_sync_join(struct bfa_ioc_s *ioc) { u32 r32 = readl(ioc->ioc_regs.ioc_fwstate); u32 join_pos = bfa_ioc_cb_join_pos(ioc); writel((r32 | join_pos), ioc->ioc_regs.ioc_fwstate); } static void bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc) { u32 r32 = readl(ioc->ioc_regs.ioc_fwstate); u32 join_pos = bfa_ioc_cb_join_pos(ioc); writel((r32 & ~join_pos), ioc->ioc_regs.ioc_fwstate); } static void bfa_ioc_cb_set_cur_ioc_fwstate(struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate) { u32 r32 = readl(ioc->ioc_regs.ioc_fwstate); writel((fwstate | (r32 & BFA_IOC_CB_JOIN_MASK)), ioc->ioc_regs.ioc_fwstate); } static enum bfi_ioc_state bfa_ioc_cb_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc) { return (enum bfi_ioc_state)(readl(ioc->ioc_regs.ioc_fwstate) & BFA_IOC_CB_FWSTATE_MASK); } static void bfa_ioc_cb_set_alt_ioc_fwstate(struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate) { u32 r32 = readl(ioc->ioc_regs.alt_ioc_fwstate); writel((fwstate | (r32 & BFA_IOC_CB_JOIN_MASK)), ioc->ioc_regs.alt_ioc_fwstate); } static enum bfi_ioc_state bfa_ioc_cb_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc) { return (enum bfi_ioc_state)(readl(ioc->ioc_regs.alt_ioc_fwstate) & BFA_IOC_CB_FWSTATE_MASK); } static void bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc) { bfa_ioc_cb_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); } static bfa_boolean_t bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc) { u32 fwstate, alt_fwstate; fwstate = bfa_ioc_cb_get_cur_ioc_fwstate(ioc); /* * At this point, this IOC is hoding the hw sem in the * start path (fwcheck) OR in the disable/enable path * OR to check if the other IOC has acknowledged failure. * * So, this IOC can be in UNINIT, INITING, DISABLED, FAIL * or in MEMTEST states. In a normal scenario, this IOC * can not be in OP state when this function is called. * * However, this IOC could still be in OP state when * the OS driver is starting up, if the OptROM code has * left it in that state. * * If we had marked this IOC's fwstate as BFI_IOC_FAIL * in the failure case and now, if the fwstate is not * BFI_IOC_FAIL it implies that the other PCI fn have * reinitialized the ASIC or this IOC got disabled, so * return TRUE. */ if (fwstate == BFI_IOC_UNINIT || fwstate == BFI_IOC_INITING || fwstate == BFI_IOC_DISABLED || fwstate == BFI_IOC_MEMTEST || fwstate == BFI_IOC_OP) return BFA_TRUE; else { alt_fwstate = bfa_ioc_cb_get_alt_ioc_fwstate(ioc); if (alt_fwstate == BFI_IOC_FAIL || alt_fwstate == BFI_IOC_DISABLED || alt_fwstate == BFI_IOC_UNINIT || alt_fwstate == BFI_IOC_INITING || alt_fwstate == BFI_IOC_MEMTEST) return BFA_TRUE; else return BFA_FALSE; } } bfa_status_t bfa_ioc_cb_pll_init(void __iomem *rb, enum bfi_asic_mode fcmode) { u32 pll_sclk, pll_fclk, join_bits; pll_sclk = __APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_P0_1(3U) | __APP_PLL_SCLK_JITLMT0_1(3U) | __APP_PLL_SCLK_CNTLMT0_1(3U); pll_fclk = __APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) | __APP_PLL_LCLK_JITLMT0_1(3U) | __APP_PLL_LCLK_CNTLMT0_1(3U); join_bits = readl(rb + BFA_IOC0_STATE_REG) & BFA_IOC_CB_JOIN_MASK; writel((BFI_IOC_UNINIT | join_bits), (rb + BFA_IOC0_STATE_REG)); join_bits = readl(rb + BFA_IOC1_STATE_REG) & BFA_IOC_CB_JOIN_MASK; writel((BFI_IOC_UNINIT | join_bits), (rb + BFA_IOC1_STATE_REG)); writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); writel(__APP_PLL_SCLK_LOGIC_SOFT_RESET, rb + APP_PLL_SCLK_CTL_REG); writel(__APP_PLL_SCLK_BYPASS | __APP_PLL_SCLK_LOGIC_SOFT_RESET, rb + APP_PLL_SCLK_CTL_REG); writel(__APP_PLL_LCLK_LOGIC_SOFT_RESET, rb + APP_PLL_LCLK_CTL_REG); writel(__APP_PLL_LCLK_BYPASS | __APP_PLL_LCLK_LOGIC_SOFT_RESET, rb + APP_PLL_LCLK_CTL_REG); udelay(2); writel(__APP_PLL_SCLK_LOGIC_SOFT_RESET, rb + APP_PLL_SCLK_CTL_REG); writel(__APP_PLL_LCLK_LOGIC_SOFT_RESET, rb + APP_PLL_LCLK_CTL_REG); writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET, rb + APP_PLL_SCLK_CTL_REG); writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET, rb + APP_PLL_LCLK_CTL_REG); udelay(2000); writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); writel(pll_sclk, (rb + APP_PLL_SCLK_CTL_REG)); writel(pll_fclk, (rb + APP_PLL_LCLK_CTL_REG)); return BFA_STATUS_OK; }
linux-master
drivers/scsi/bfa/bfa_ioc_cb.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. * Copyright (c) 2014- QLogic Corporation. * All rights reserved * www.qlogic.com * * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. */ /* * fcbuild.c - FC link service frame building and parsing routines */ #include "bfad_drv.h" #include "bfa_fcbuild.h" /* * static build functions */ static void fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id); static void fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id); static struct fchs_s fc_els_req_tmpl; static struct fchs_s fc_els_rsp_tmpl; static struct fchs_s fc_bls_req_tmpl; static struct fchs_s fc_bls_rsp_tmpl; static struct fc_ba_acc_s ba_acc_tmpl; static struct fc_logi_s plogi_tmpl; static struct fc_prli_s prli_tmpl; static struct fc_rrq_s rrq_tmpl; static struct fchs_s fcp_fchs_tmpl; void fcbuild_init(void) { /* * fc_els_req_tmpl */ fc_els_req_tmpl.routing = FC_RTG_EXT_LINK; fc_els_req_tmpl.cat_info = FC_CAT_LD_REQUEST; fc_els_req_tmpl.type = FC_TYPE_ELS; fc_els_req_tmpl.f_ctl = bfa_hton3b(FCTL_SEQ_INI | FCTL_FS_EXCH | FCTL_END_SEQ | FCTL_SI_XFER); fc_els_req_tmpl.rx_id = FC_RXID_ANY; /* * fc_els_rsp_tmpl */ fc_els_rsp_tmpl.routing = FC_RTG_EXT_LINK; fc_els_rsp_tmpl.cat_info = FC_CAT_LD_REPLY; fc_els_rsp_tmpl.type = FC_TYPE_ELS; fc_els_rsp_tmpl.f_ctl = bfa_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH | FCTL_END_SEQ | FCTL_SI_XFER); fc_els_rsp_tmpl.rx_id = FC_RXID_ANY; /* * fc_bls_req_tmpl */ fc_bls_req_tmpl.routing = FC_RTG_BASIC_LINK; fc_bls_req_tmpl.type = FC_TYPE_BLS; fc_bls_req_tmpl.f_ctl = bfa_hton3b(FCTL_END_SEQ | FCTL_SI_XFER); fc_bls_req_tmpl.rx_id = FC_RXID_ANY; /* * fc_bls_rsp_tmpl */ fc_bls_rsp_tmpl.routing = FC_RTG_BASIC_LINK; fc_bls_rsp_tmpl.cat_info = FC_CAT_BA_ACC; fc_bls_rsp_tmpl.type = FC_TYPE_BLS; fc_bls_rsp_tmpl.f_ctl = bfa_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH | FCTL_END_SEQ | FCTL_SI_XFER); fc_bls_rsp_tmpl.rx_id = FC_RXID_ANY; /* * ba_acc_tmpl */ ba_acc_tmpl.seq_id_valid = 0; ba_acc_tmpl.low_seq_cnt = 0; ba_acc_tmpl.high_seq_cnt = 0xFFFF; /* * plogi_tmpl */ plogi_tmpl.csp.verhi = FC_PH_VER_PH_3; plogi_tmpl.csp.verlo = FC_PH_VER_4_3; plogi_tmpl.csp.ciro = 0x1; plogi_tmpl.csp.cisc = 0x0; plogi_tmpl.csp.altbbcred = 0x0; plogi_tmpl.csp.conseq = cpu_to_be16(0x00FF); plogi_tmpl.csp.ro_bitmap = cpu_to_be16(0x0002); plogi_tmpl.csp.e_d_tov = cpu_to_be32(2000); plogi_tmpl.class3.class_valid = 1; plogi_tmpl.class3.sequential = 1; plogi_tmpl.class3.conseq = 0xFF; plogi_tmpl.class3.ospx = 1; /* * prli_tmpl */ prli_tmpl.command = FC_ELS_PRLI; prli_tmpl.pglen = 0x10; prli_tmpl.pagebytes = cpu_to_be16(0x0014); prli_tmpl.parampage.type = FC_TYPE_FCP; prli_tmpl.parampage.imagepair = 1; prli_tmpl.parampage.servparams.rxrdisab = 1; /* * rrq_tmpl */ rrq_tmpl.els_cmd.els_code = FC_ELS_RRQ; /* * fcp_struct fchs_s mpl */ fcp_fchs_tmpl.routing = FC_RTG_FC4_DEV_DATA; fcp_fchs_tmpl.cat_info = FC_CAT_UNSOLICIT_CMD; fcp_fchs_tmpl.type = FC_TYPE_FCP; fcp_fchs_tmpl.f_ctl = bfa_hton3b(FCTL_FS_EXCH | FCTL_END_SEQ | FCTL_SI_XFER); fcp_fchs_tmpl.seq_id = 1; fcp_fchs_tmpl.rx_id = FC_RXID_ANY; } static void fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u32 ox_id) { memset(fchs, 0, sizeof(struct fchs_s)); fchs->routing = FC_RTG_FC4_DEV_DATA; fchs->cat_info = FC_CAT_UNSOLICIT_CTRL; fchs->type = FC_TYPE_SERVICES; fchs->f_ctl = bfa_hton3b(FCTL_SEQ_INI | FCTL_FS_EXCH | FCTL_END_SEQ | FCTL_SI_XFER); fchs->rx_id = FC_RXID_ANY; fchs->d_id = (d_id); fchs->s_id = (s_id); fchs->ox_id = cpu_to_be16(ox_id); /* * @todo no need to set ox_id for request * no need to set rx_id for response */ } static void fc_gsresp_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id) { memset(fchs, 0, sizeof(struct fchs_s)); fchs->routing = FC_RTG_FC4_DEV_DATA; fchs->cat_info = FC_CAT_SOLICIT_CTRL; fchs->type = FC_TYPE_SERVICES; fchs->f_ctl = bfa_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH | FCTL_END_SEQ | FCTL_SI_XFER); fchs->d_id = d_id; fchs->s_id = s_id; fchs->ox_id = ox_id; } void fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id) { memcpy(fchs, &fc_els_req_tmpl, sizeof(struct fchs_s)); fchs->d_id = (d_id); fchs->s_id = (s_id); fchs->ox_id = cpu_to_be16(ox_id); } static void fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id) { memcpy(fchs, &fc_els_rsp_tmpl, sizeof(struct fchs_s)); fchs->d_id = d_id; fchs->s_id = s_id; fchs->ox_id = ox_id; } static void fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id) { memcpy(fchs, &fc_bls_rsp_tmpl, sizeof(struct fchs_s)); fchs->d_id = d_id; fchs->s_id = s_id; fchs->ox_id = ox_id; } static u16 fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, __be16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size, u16 bb_cr, u8 els_code) { struct fc_logi_s *plogi = (struct fc_logi_s *) (pld); memcpy(plogi, &plogi_tmpl, sizeof(struct fc_logi_s)); /* For FC AL bb_cr is 0 and altbbcred is 1 */ if (!bb_cr) plogi->csp.altbbcred = 1; plogi->els_cmd.els_code = els_code; if (els_code == FC_ELS_PLOGI) fc_els_req_build(fchs, d_id, s_id, ox_id); else fc_els_rsp_build(fchs, d_id, s_id, ox_id); plogi->csp.rxsz = plogi->class3.rxsz = cpu_to_be16(pdu_size); plogi->csp.bbcred = cpu_to_be16(bb_cr); memcpy(&plogi->port_name, &port_name, sizeof(wwn_t)); memcpy(&plogi->node_name, &node_name, sizeof(wwn_t)); return sizeof(struct fc_logi_s); } u16 fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, u16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size, u8 set_npiv, u8 set_auth, u16 local_bb_credits) { u32 d_id = bfa_hton3b(FC_FABRIC_PORT); __be32 *vvl_info; memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s)); flogi->els_cmd.els_code = FC_ELS_FLOGI; fc_els_req_build(fchs, d_id, s_id, ox_id); flogi->csp.rxsz = flogi->class3.rxsz = cpu_to_be16(pdu_size); flogi->port_name = port_name; flogi->node_name = node_name; /* * Set the NPIV Capability Bit ( word 1, bit 31) of Common * Service Parameters. */ flogi->csp.ciro = set_npiv; /* set AUTH capability */ flogi->csp.security = set_auth; flogi->csp.bbcred = cpu_to_be16(local_bb_credits); /* Set brcd token in VVL */ vvl_info = (u32 *)&flogi->vvl[0]; /* set the flag to indicate the presence of VVL */ flogi->csp.npiv_supp = 1; /* @todo. field name is not correct */ vvl_info[0] = cpu_to_be32(FLOGI_VVL_BRCD); return sizeof(struct fc_logi_s); } u16 fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, __be16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size, u16 local_bb_credits, u8 bb_scn) { u32 d_id = 0; u16 bbscn_rxsz = (bb_scn << 12) | pdu_size; memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s)); fc_els_rsp_build(fchs, d_id, s_id, ox_id); flogi->els_cmd.els_code = FC_ELS_ACC; flogi->class3.rxsz = cpu_to_be16(pdu_size); flogi->csp.rxsz = cpu_to_be16(bbscn_rxsz); /* bb_scn/rxsz */ flogi->port_name = port_name; flogi->node_name = node_name; flogi->csp.bbcred = cpu_to_be16(local_bb_credits); return sizeof(struct fc_logi_s); } u16 fc_fdisc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, u16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size) { u32 d_id = bfa_hton3b(FC_FABRIC_PORT); memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s)); flogi->els_cmd.els_code = FC_ELS_FDISC; fc_els_req_build(fchs, d_id, s_id, ox_id); flogi->csp.rxsz = flogi->class3.rxsz = cpu_to_be16(pdu_size); flogi->port_name = port_name; flogi->node_name = node_name; return sizeof(struct fc_logi_s); } u16 fc_plogi_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, u16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size, u16 bb_cr) { return fc_plogi_x_build(fchs, pld, d_id, s_id, ox_id, port_name, node_name, pdu_size, bb_cr, FC_ELS_PLOGI); } u16 fc_plogi_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, u16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size, u16 bb_cr) { return fc_plogi_x_build(fchs, pld, d_id, s_id, ox_id, port_name, node_name, pdu_size, bb_cr, FC_ELS_ACC); } enum fc_parse_status fc_plogi_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name) { struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); struct fc_logi_s *plogi; struct fc_ls_rjt_s *ls_rjt; switch (els_cmd->els_code) { case FC_ELS_LS_RJT: ls_rjt = (struct fc_ls_rjt_s *) (fchs + 1); if (ls_rjt->reason_code == FC_LS_RJT_RSN_LOGICAL_BUSY) return FC_PARSE_BUSY; else return FC_PARSE_FAILURE; case FC_ELS_ACC: plogi = (struct fc_logi_s *) (fchs + 1); if (len < sizeof(struct fc_logi_s)) return FC_PARSE_FAILURE; if (!wwn_is_equal(plogi->port_name, port_name)) return FC_PARSE_FAILURE; if (!plogi->class3.class_valid) return FC_PARSE_FAILURE; if (be16_to_cpu(plogi->class3.rxsz) < (FC_MIN_PDUSZ)) return FC_PARSE_FAILURE; return FC_PARSE_OK; default: return FC_PARSE_FAILURE; } } enum fc_parse_status fc_plogi_parse(struct fchs_s *fchs) { struct fc_logi_s *plogi = (struct fc_logi_s *) (fchs + 1); if (plogi->class3.class_valid != 1) return FC_PARSE_FAILURE; if ((be16_to_cpu(plogi->class3.rxsz) < FC_MIN_PDUSZ) || (be16_to_cpu(plogi->class3.rxsz) > FC_MAX_PDUSZ) || (plogi->class3.rxsz == 0)) return FC_PARSE_FAILURE; return FC_PARSE_OK; } u16 fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, u16 ox_id) { struct fc_prli_s *prli = (struct fc_prli_s *) (pld); fc_els_req_build(fchs, d_id, s_id, ox_id); memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s)); prli->command = FC_ELS_PRLI; prli->parampage.servparams.initiator = 1; prli->parampage.servparams.retry = 1; prli->parampage.servparams.rec_support = 1; prli->parampage.servparams.task_retry_id = 0; prli->parampage.servparams.confirm = 1; return sizeof(struct fc_prli_s); } u16 fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, __be16 ox_id, enum bfa_lport_role role) { struct fc_prli_s *prli = (struct fc_prli_s *) (pld); fc_els_rsp_build(fchs, d_id, s_id, ox_id); memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s)); prli->command = FC_ELS_ACC; prli->parampage.servparams.initiator = 1; prli->parampage.rspcode = FC_PRLI_ACC_XQTD; return sizeof(struct fc_prli_s); } enum fc_parse_status fc_prli_rsp_parse(struct fc_prli_s *prli, int len) { if (len < sizeof(struct fc_prli_s)) return FC_PARSE_FAILURE; if (prli->command != FC_ELS_ACC) return FC_PARSE_FAILURE; if ((prli->parampage.rspcode != FC_PRLI_ACC_XQTD) && (prli->parampage.rspcode != FC_PRLI_ACC_PREDEF_IMG)) return FC_PARSE_FAILURE; if (prli->parampage.servparams.target != 1) return FC_PARSE_FAILURE; return FC_PARSE_OK; } enum fc_parse_status fc_prli_parse(struct fc_prli_s *prli) { if (prli->parampage.type != FC_TYPE_FCP) return FC_PARSE_FAILURE; if (!prli->parampage.imagepair) return FC_PARSE_FAILURE; if (!prli->parampage.servparams.initiator) return FC_PARSE_FAILURE; return FC_PARSE_OK; } u16 fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo, u32 d_id, u32 s_id, u16 ox_id, wwn_t port_name) { fc_els_req_build(fchs, d_id, s_id, ox_id); memset(logo, '\0', sizeof(struct fc_logo_s)); logo->els_cmd.els_code = FC_ELS_LOGO; logo->nport_id = (s_id); logo->orig_port_name = port_name; return sizeof(struct fc_logo_s); } static u16 fc_adisc_x_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id, u32 s_id, __be16 ox_id, wwn_t port_name, wwn_t node_name, u8 els_code) { memset(adisc, '\0', sizeof(struct fc_adisc_s)); adisc->els_cmd.els_code = els_code; if (els_code == FC_ELS_ADISC) fc_els_req_build(fchs, d_id, s_id, ox_id); else fc_els_rsp_build(fchs, d_id, s_id, ox_id); adisc->orig_HA = 0; adisc->orig_port_name = port_name; adisc->orig_node_name = node_name; adisc->nport_id = (s_id); return sizeof(struct fc_adisc_s); } u16 fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id, u32 s_id, __be16 ox_id, wwn_t port_name, wwn_t node_name) { return fc_adisc_x_build(fchs, adisc, d_id, s_id, ox_id, port_name, node_name, FC_ELS_ADISC); } u16 fc_adisc_acc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id, u32 s_id, __be16 ox_id, wwn_t port_name, wwn_t node_name) { return fc_adisc_x_build(fchs, adisc, d_id, s_id, ox_id, port_name, node_name, FC_ELS_ACC); } enum fc_parse_status fc_adisc_rsp_parse(struct fc_adisc_s *adisc, int len, wwn_t port_name, wwn_t node_name) { if (len < sizeof(struct fc_adisc_s)) return FC_PARSE_FAILURE; if (adisc->els_cmd.els_code != FC_ELS_ACC) return FC_PARSE_FAILURE; if (!wwn_is_equal(adisc->orig_port_name, port_name)) return FC_PARSE_FAILURE; return FC_PARSE_OK; } enum fc_parse_status fc_adisc_parse(struct fchs_s *fchs, void *pld, u32 host_dap, wwn_t node_name, wwn_t port_name) { struct fc_adisc_s *adisc = (struct fc_adisc_s *) pld; if (adisc->els_cmd.els_code != FC_ELS_ACC) return FC_PARSE_FAILURE; if ((adisc->nport_id == (host_dap)) && wwn_is_equal(adisc->orig_port_name, port_name) && wwn_is_equal(adisc->orig_node_name, node_name)) return FC_PARSE_OK; return FC_PARSE_FAILURE; } enum fc_parse_status fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name, wwn_t port_name) { struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1); if (pdisc->class3.class_valid != 1) return FC_PARSE_FAILURE; if ((be16_to_cpu(pdisc->class3.rxsz) < (FC_MIN_PDUSZ - sizeof(struct fchs_s))) || (pdisc->class3.rxsz == 0)) return FC_PARSE_FAILURE; if (!wwn_is_equal(pdisc->port_name, port_name)) return FC_PARSE_FAILURE; if (!wwn_is_equal(pdisc->node_name, node_name)) return FC_PARSE_FAILURE; return FC_PARSE_OK; } u16 fc_abts_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id) { memcpy(fchs, &fc_bls_req_tmpl, sizeof(struct fchs_s)); fchs->cat_info = FC_CAT_ABTS; fchs->d_id = (d_id); fchs->s_id = (s_id); fchs->ox_id = cpu_to_be16(ox_id); return sizeof(struct fchs_s); } enum fc_parse_status fc_abts_rsp_parse(struct fchs_s *fchs, int len) { if ((fchs->cat_info == FC_CAT_BA_ACC) || (fchs->cat_info == FC_CAT_BA_RJT)) return FC_PARSE_OK; return FC_PARSE_FAILURE; } u16 fc_rrq_build(struct fchs_s *fchs, struct fc_rrq_s *rrq, u32 d_id, u32 s_id, u16 ox_id, u16 rrq_oxid) { fc_els_req_build(fchs, d_id, s_id, ox_id); /* * build rrq payload */ memcpy(rrq, &rrq_tmpl, sizeof(struct fc_rrq_s)); rrq->s_id = (s_id); rrq->ox_id = cpu_to_be16(rrq_oxid); rrq->rx_id = FC_RXID_ANY; return sizeof(struct fc_rrq_s); } u16 fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, __be16 ox_id) { struct fc_els_cmd_s *acc = pld; fc_els_rsp_build(fchs, d_id, s_id, ox_id); memset(acc, 0, sizeof(struct fc_els_cmd_s)); acc->els_code = FC_ELS_ACC; return sizeof(struct fc_els_cmd_s); } u16 fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt, u32 d_id, u32 s_id, __be16 ox_id, u8 reason_code, u8 reason_code_expl) { fc_els_rsp_build(fchs, d_id, s_id, ox_id); memset(ls_rjt, 0, sizeof(struct fc_ls_rjt_s)); ls_rjt->els_cmd.els_code = FC_ELS_LS_RJT; ls_rjt->reason_code = reason_code; ls_rjt->reason_code_expl = reason_code_expl; ls_rjt->vendor_unique = 0x00; return sizeof(struct fc_ls_rjt_s); } u16 fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id, u32 s_id, __be16 ox_id, u16 rx_id) { fc_bls_rsp_build(fchs, d_id, s_id, ox_id); memcpy(ba_acc, &ba_acc_tmpl, sizeof(struct fc_ba_acc_s)); fchs->rx_id = rx_id; ba_acc->ox_id = fchs->ox_id; ba_acc->rx_id = fchs->rx_id; return sizeof(struct fc_ba_acc_s); } u16 fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd, u32 d_id, u32 s_id, __be16 ox_id) { fc_els_rsp_build(fchs, d_id, s_id, ox_id); memset(els_cmd, 0, sizeof(struct fc_els_cmd_s)); els_cmd->els_code = FC_ELS_ACC; return sizeof(struct fc_els_cmd_s); } int fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code) { int num_pages = 0; struct fc_prlo_s *prlo; struct fc_tprlo_s *tprlo; if (els_code == FC_ELS_PRLO) { prlo = (struct fc_prlo_s *) (fc_frame + 1); num_pages = (be16_to_cpu(prlo->payload_len) - 4) / 16; } else { tprlo = (struct fc_tprlo_s *) (fc_frame + 1); num_pages = (be16_to_cpu(tprlo->payload_len) - 4) / 16; } return num_pages; } u16 fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc, u32 d_id, u32 s_id, __be16 ox_id, int num_pages) { int page; fc_els_rsp_build(fchs, d_id, s_id, ox_id); memset(tprlo_acc, 0, (num_pages * 16) + 4); tprlo_acc->command = FC_ELS_ACC; tprlo_acc->page_len = 0x10; tprlo_acc->payload_len = cpu_to_be16((num_pages * 16) + 4); for (page = 0; page < num_pages; page++) { tprlo_acc->tprlo_acc_params[page].opa_valid = 0; tprlo_acc->tprlo_acc_params[page].rpa_valid = 0; tprlo_acc->tprlo_acc_params[page].fc4type_csp = FC_TYPE_FCP; tprlo_acc->tprlo_acc_params[page].orig_process_assc = 0; tprlo_acc->tprlo_acc_params[page].resp_process_assc = 0; } return be16_to_cpu(tprlo_acc->payload_len); } u16 fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc, u32 d_id, u32 s_id, __be16 ox_id, int num_pages) { int page; fc_els_rsp_build(fchs, d_id, s_id, ox_id); memset(prlo_acc, 0, (num_pages * 16) + 4); prlo_acc->command = FC_ELS_ACC; prlo_acc->page_len = 0x10; prlo_acc->payload_len = cpu_to_be16((num_pages * 16) + 4); for (page = 0; page < num_pages; page++) { prlo_acc->prlo_acc_params[page].opa_valid = 0; prlo_acc->prlo_acc_params[page].rpa_valid = 0; prlo_acc->prlo_acc_params[page].fc4type_csp = FC_TYPE_FCP; prlo_acc->prlo_acc_params[page].orig_process_assc = 0; prlo_acc->prlo_acc_params[page].resp_process_assc = 0; } return be16_to_cpu(prlo_acc->payload_len); } u16 fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid, u32 d_id, u32 s_id, u16 ox_id, u32 data_format) { fc_els_req_build(fchs, d_id, s_id, ox_id); memset(rnid, 0, sizeof(struct fc_rnid_cmd_s)); rnid->els_cmd.els_code = FC_ELS_RNID; rnid->node_id_data_format = data_format; return sizeof(struct fc_rnid_cmd_s); } u16 fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc, u32 d_id, u32 s_id, __be16 ox_id, u32 data_format, struct fc_rnid_common_id_data_s *common_id_data, struct fc_rnid_general_topology_data_s *gen_topo_data) { memset(rnid_acc, 0, sizeof(struct fc_rnid_acc_s)); fc_els_rsp_build(fchs, d_id, s_id, ox_id); rnid_acc->els_cmd.els_code = FC_ELS_ACC; rnid_acc->node_id_data_format = data_format; rnid_acc->common_id_data_length = sizeof(struct fc_rnid_common_id_data_s); rnid_acc->common_id_data = *common_id_data; if (data_format == RNID_NODEID_DATA_FORMAT_DISCOVERY) { rnid_acc->specific_id_data_length = sizeof(struct fc_rnid_general_topology_data_s); rnid_acc->gen_topology_data = *gen_topo_data; return sizeof(struct fc_rnid_acc_s); } else { return sizeof(struct fc_rnid_acc_s) - sizeof(struct fc_rnid_general_topology_data_s); } } u16 fc_rpsc_build(struct fchs_s *fchs, struct fc_rpsc_cmd_s *rpsc, u32 d_id, u32 s_id, u16 ox_id) { fc_els_req_build(fchs, d_id, s_id, ox_id); memset(rpsc, 0, sizeof(struct fc_rpsc_cmd_s)); rpsc->els_cmd.els_code = FC_ELS_RPSC; return sizeof(struct fc_rpsc_cmd_s); } u16 fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rpsc2, u32 d_id, u32 s_id, u32 *pid_list, u16 npids) { u32 dctlr_id = FC_DOMAIN_CTRLR(bfa_hton3b(d_id)); int i = 0; fc_els_req_build(fchs, bfa_hton3b(dctlr_id), s_id, 0); memset(rpsc2, 0, sizeof(struct fc_rpsc2_cmd_s)); rpsc2->els_cmd.els_code = FC_ELS_RPSC; rpsc2->token = cpu_to_be32(FC_BRCD_TOKEN); rpsc2->num_pids = cpu_to_be16(npids); for (i = 0; i < npids; i++) rpsc2->pid_list[i].pid = pid_list[i]; return sizeof(struct fc_rpsc2_cmd_s) + ((npids - 1) * (sizeof(u32))); } u16 fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc, u32 d_id, u32 s_id, __be16 ox_id, struct fc_rpsc_speed_info_s *oper_speed) { memset(rpsc_acc, 0, sizeof(struct fc_rpsc_acc_s)); fc_els_rsp_build(fchs, d_id, s_id, ox_id); rpsc_acc->command = FC_ELS_ACC; rpsc_acc->num_entries = cpu_to_be16(1); rpsc_acc->speed_info[0].port_speed_cap = cpu_to_be16(oper_speed->port_speed_cap); rpsc_acc->speed_info[0].port_op_speed = cpu_to_be16(oper_speed->port_op_speed); return sizeof(struct fc_rpsc_acc_s); } u16 fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size) { struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1); memcpy(pdisc, &plogi_tmpl, sizeof(struct fc_logi_s)); pdisc->els_cmd.els_code = FC_ELS_PDISC; fc_els_req_build(fchs, d_id, s_id, ox_id); pdisc->csp.rxsz = pdisc->class3.rxsz = cpu_to_be16(pdu_size); pdisc->port_name = port_name; pdisc->node_name = node_name; return sizeof(struct fc_logi_s); } u16 fc_pdisc_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name) { struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1); if (len < sizeof(struct fc_logi_s)) return FC_PARSE_LEN_INVAL; if (pdisc->els_cmd.els_code != FC_ELS_ACC) return FC_PARSE_ACC_INVAL; if (!wwn_is_equal(pdisc->port_name, port_name)) return FC_PARSE_PWWN_NOT_EQUAL; if (!pdisc->class3.class_valid) return FC_PARSE_NWWN_NOT_EQUAL; if (be16_to_cpu(pdisc->class3.rxsz) < (FC_MIN_PDUSZ)) return FC_PARSE_RXSZ_INVAL; return FC_PARSE_OK; } u16 fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id, int num_pages) { struct fc_prlo_s *prlo = (struct fc_prlo_s *) (fchs + 1); int page; fc_els_req_build(fchs, d_id, s_id, ox_id); memset(prlo, 0, (num_pages * 16) + 4); prlo->command = FC_ELS_PRLO; prlo->page_len = 0x10; prlo->payload_len = cpu_to_be16((num_pages * 16) + 4); for (page = 0; page < num_pages; page++) { prlo->prlo_params[page].type = FC_TYPE_FCP; prlo->prlo_params[page].opa_valid = 0; prlo->prlo_params[page].rpa_valid = 0; prlo->prlo_params[page].orig_process_assc = 0; prlo->prlo_params[page].resp_process_assc = 0; } return be16_to_cpu(prlo->payload_len); } u16 fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id, int num_pages, enum fc_tprlo_type tprlo_type, u32 tpr_id) { struct fc_tprlo_s *tprlo = (struct fc_tprlo_s *) (fchs + 1); int page; fc_els_req_build(fchs, d_id, s_id, ox_id); memset(tprlo, 0, (num_pages * 16) + 4); tprlo->command = FC_ELS_TPRLO; tprlo->page_len = 0x10; tprlo->payload_len = cpu_to_be16((num_pages * 16) + 4); for (page = 0; page < num_pages; page++) { tprlo->tprlo_params[page].type = FC_TYPE_FCP; tprlo->tprlo_params[page].opa_valid = 0; tprlo->tprlo_params[page].rpa_valid = 0; tprlo->tprlo_params[page].orig_process_assc = 0; tprlo->tprlo_params[page].resp_process_assc = 0; if (tprlo_type == FC_GLOBAL_LOGO) { tprlo->tprlo_params[page].global_process_logout = 1; } else if (tprlo_type == FC_TPR_LOGO) { tprlo->tprlo_params[page].tpo_nport_valid = 1; tprlo->tprlo_params[page].tpo_nport_id = (tpr_id); } } return be16_to_cpu(tprlo->payload_len); } u16 fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id, u32 reason_code, u32 reason_expl) { struct fc_ba_rjt_s *ba_rjt = (struct fc_ba_rjt_s *) (fchs + 1); fc_bls_rsp_build(fchs, d_id, s_id, ox_id); fchs->cat_info = FC_CAT_BA_RJT; ba_rjt->reason_code = reason_code; ba_rjt->reason_expl = reason_expl; return sizeof(struct fc_ba_rjt_s); } static void fc_gs_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code) { memset(cthdr, 0, sizeof(struct ct_hdr_s)); cthdr->rev_id = CT_GS3_REVISION; cthdr->gs_type = CT_GSTYPE_DIRSERVICE; cthdr->gs_sub_type = CT_GSSUBTYPE_NAMESERVER; cthdr->cmd_rsp_code = cpu_to_be16(cmd_code); } static void fc_gs_fdmi_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code) { memset(cthdr, 0, sizeof(struct ct_hdr_s)); cthdr->rev_id = CT_GS3_REVISION; cthdr->gs_type = CT_GSTYPE_MGMTSERVICE; cthdr->gs_sub_type = CT_GSSUBTYPE_HBA_MGMTSERVER; cthdr->cmd_rsp_code = cpu_to_be16(cmd_code); } static void fc_gs_ms_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code, u8 sub_type) { memset(cthdr, 0, sizeof(struct ct_hdr_s)); cthdr->rev_id = CT_GS3_REVISION; cthdr->gs_type = CT_GSTYPE_MGMTSERVICE; cthdr->gs_sub_type = sub_type; cthdr->cmd_rsp_code = cpu_to_be16(cmd_code); } u16 fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, wwn_t port_name) { struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; struct fcgs_gidpn_req_s *gidpn = (struct fcgs_gidpn_req_s *)(cthdr + 1); u32 d_id = bfa_hton3b(FC_NAME_SERVER); fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); fc_gs_cthdr_build(cthdr, s_id, GS_GID_PN); memset(gidpn, 0, sizeof(struct fcgs_gidpn_req_s)); gidpn->port_name = port_name; return sizeof(struct fcgs_gidpn_req_s) + sizeof(struct ct_hdr_s); } u16 fc_gpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, u32 port_id) { struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; fcgs_gpnid_req_t *gpnid = (fcgs_gpnid_req_t *) (cthdr + 1); u32 d_id = bfa_hton3b(FC_NAME_SERVER); fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); fc_gs_cthdr_build(cthdr, s_id, GS_GPN_ID); memset(gpnid, 0, sizeof(fcgs_gpnid_req_t)); gpnid->dap = port_id; return sizeof(fcgs_gpnid_req_t) + sizeof(struct ct_hdr_s); } u16 fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, u32 port_id) { struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; fcgs_gnnid_req_t *gnnid = (fcgs_gnnid_req_t *) (cthdr + 1); u32 d_id = bfa_hton3b(FC_NAME_SERVER); fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); fc_gs_cthdr_build(cthdr, s_id, GS_GNN_ID); memset(gnnid, 0, sizeof(fcgs_gnnid_req_t)); gnnid->dap = port_id; return sizeof(fcgs_gnnid_req_t) + sizeof(struct ct_hdr_s); } u16 fc_ct_rsp_parse(struct ct_hdr_s *cthdr) { if (be16_to_cpu(cthdr->cmd_rsp_code) != CT_RSP_ACCEPT) { if (cthdr->reason_code == CT_RSN_LOGICAL_BUSY) return FC_PARSE_BUSY; else return FC_PARSE_FAILURE; } return FC_PARSE_OK; } u16 fc_gs_rjt_build(struct fchs_s *fchs, struct ct_hdr_s *cthdr, u32 d_id, u32 s_id, u16 ox_id, u8 reason_code, u8 reason_code_expl) { fc_gsresp_fchdr_build(fchs, d_id, s_id, ox_id); cthdr->cmd_rsp_code = cpu_to_be16(CT_RSP_REJECT); cthdr->rev_id = CT_GS3_REVISION; cthdr->reason_code = reason_code; cthdr->exp_code = reason_code_expl; return sizeof(struct ct_hdr_s); } u16 fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr, u8 set_br_reg, u32 s_id, u16 ox_id) { u32 d_id = bfa_hton3b(FC_FABRIC_CONTROLLER); fc_els_req_build(fchs, d_id, s_id, ox_id); memset(scr, 0, sizeof(struct fc_scr_s)); scr->command = FC_ELS_SCR; scr->reg_func = FC_SCR_REG_FUNC_FULL; if (set_br_reg) scr->vu_reg_func = FC_VU_SCR_REG_FUNC_FABRIC_NAME_CHANGE; return sizeof(struct fc_scr_s); } u16 fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn, u32 s_id, u16 ox_id) { u32 d_id = bfa_hton3b(FC_FABRIC_CONTROLLER); u16 payldlen; fc_els_req_build(fchs, d_id, s_id, ox_id); rscn->command = FC_ELS_RSCN; rscn->pagelen = sizeof(rscn->event[0]); payldlen = sizeof(u32) + rscn->pagelen; rscn->payldlen = cpu_to_be16(payldlen); rscn->event[0].format = FC_RSCN_FORMAT_PORTID; rscn->event[0].portid = s_id; return struct_size(rscn, event, 1); } u16 fc_rftid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, enum bfa_lport_role roles) { struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; struct fcgs_rftid_req_s *rftid = (struct fcgs_rftid_req_s *)(cthdr + 1); u32 type_value, d_id = bfa_hton3b(FC_NAME_SERVER); u8 index; fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); fc_gs_cthdr_build(cthdr, s_id, GS_RFT_ID); memset(rftid, 0, sizeof(struct fcgs_rftid_req_s)); rftid->dap = s_id; /* By default, FCP FC4 Type is registered */ index = FC_TYPE_FCP >> 5; type_value = 1 << (FC_TYPE_FCP % 32); rftid->fc4_type[index] = cpu_to_be32(type_value); return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s); } u16 fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, u8 *fc4_bitmap, u32 bitmap_size) { struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; struct fcgs_rftid_req_s *rftid = (struct fcgs_rftid_req_s *)(cthdr + 1); u32 d_id = bfa_hton3b(FC_NAME_SERVER); fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); fc_gs_cthdr_build(cthdr, s_id, GS_RFT_ID); memset(rftid, 0, sizeof(struct fcgs_rftid_req_s)); rftid->dap = s_id; memcpy((void *)rftid->fc4_type, (void *)fc4_bitmap, (bitmap_size < 32 ? bitmap_size : 32)); return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s); } u16 fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, u8 fc4_type, u8 fc4_ftrs) { struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; struct fcgs_rffid_req_s *rffid = (struct fcgs_rffid_req_s *)(cthdr + 1); u32 d_id = bfa_hton3b(FC_NAME_SERVER); fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); fc_gs_cthdr_build(cthdr, s_id, GS_RFF_ID); memset(rffid, 0, sizeof(struct fcgs_rffid_req_s)); rffid->dap = s_id; rffid->fc4ftr_bits = fc4_ftrs; rffid->fc4_type = fc4_type; return sizeof(struct fcgs_rffid_req_s) + sizeof(struct ct_hdr_s); } u16 fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, u8 *name) { struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; struct fcgs_rspnid_req_s *rspnid = (struct fcgs_rspnid_req_s *)(cthdr + 1); u32 d_id = bfa_hton3b(FC_NAME_SERVER); fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); fc_gs_cthdr_build(cthdr, s_id, GS_RSPN_ID); memset(rspnid, 0, sizeof(struct fcgs_rspnid_req_s)); rspnid->dap = s_id; strscpy(rspnid->spn, name, sizeof(rspnid->spn)); rspnid->spn_len = (u8) strlen(rspnid->spn); return sizeof(struct fcgs_rspnid_req_s) + sizeof(struct ct_hdr_s); } u16 fc_rsnn_nn_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t node_name, u8 *name) { struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; struct fcgs_rsnn_nn_req_s *rsnn_nn = (struct fcgs_rsnn_nn_req_s *) (cthdr + 1); u32 d_id = bfa_hton3b(FC_NAME_SERVER); fc_gs_fchdr_build(fchs, d_id, s_id, 0); fc_gs_cthdr_build(cthdr, s_id, GS_RSNN_NN); memset(rsnn_nn, 0, sizeof(struct fcgs_rsnn_nn_req_s)); rsnn_nn->node_name = node_name; strscpy(rsnn_nn->snn, name, sizeof(rsnn_nn->snn)); rsnn_nn->snn_len = (u8) strlen(rsnn_nn->snn); return sizeof(struct fcgs_rsnn_nn_req_s) + sizeof(struct ct_hdr_s); } u16 fc_gid_ft_build(struct fchs_s *fchs, void *pyld, u32 s_id, u8 fc4_type) { struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; struct fcgs_gidft_req_s *gidft = (struct fcgs_gidft_req_s *)(cthdr + 1); u32 d_id = bfa_hton3b(FC_NAME_SERVER); fc_gs_fchdr_build(fchs, d_id, s_id, 0); fc_gs_cthdr_build(cthdr, s_id, GS_GID_FT); memset(gidft, 0, sizeof(struct fcgs_gidft_req_s)); gidft->fc4_type = fc4_type; gidft->domain_id = 0; gidft->area_id = 0; return sizeof(struct fcgs_gidft_req_s) + sizeof(struct ct_hdr_s); } u16 fc_rpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, wwn_t port_name) { struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; struct fcgs_rpnid_req_s *rpnid = (struct fcgs_rpnid_req_s *)(cthdr + 1); u32 d_id = bfa_hton3b(FC_NAME_SERVER); fc_gs_fchdr_build(fchs, d_id, s_id, 0); fc_gs_cthdr_build(cthdr, s_id, GS_RPN_ID); memset(rpnid, 0, sizeof(struct fcgs_rpnid_req_s)); rpnid->port_id = port_id; rpnid->port_name = port_name; return sizeof(struct fcgs_rpnid_req_s) + sizeof(struct ct_hdr_s); } u16 fc_rnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, wwn_t node_name) { struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; struct fcgs_rnnid_req_s *rnnid = (struct fcgs_rnnid_req_s *)(cthdr + 1); u32 d_id = bfa_hton3b(FC_NAME_SERVER); fc_gs_fchdr_build(fchs, d_id, s_id, 0); fc_gs_cthdr_build(cthdr, s_id, GS_RNN_ID); memset(rnnid, 0, sizeof(struct fcgs_rnnid_req_s)); rnnid->port_id = port_id; rnnid->node_name = node_name; return sizeof(struct fcgs_rnnid_req_s) + sizeof(struct ct_hdr_s); } u16 fc_rcsid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, u32 cos) { struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; struct fcgs_rcsid_req_s *rcsid = (struct fcgs_rcsid_req_s *) (cthdr + 1); u32 d_id = bfa_hton3b(FC_NAME_SERVER); fc_gs_fchdr_build(fchs, d_id, s_id, 0); fc_gs_cthdr_build(cthdr, s_id, GS_RCS_ID); memset(rcsid, 0, sizeof(struct fcgs_rcsid_req_s)); rcsid->port_id = port_id; rcsid->cos = cos; return sizeof(struct fcgs_rcsid_req_s) + sizeof(struct ct_hdr_s); } u16 fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, u8 port_type) { struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; struct fcgs_rptid_req_s *rptid = (struct fcgs_rptid_req_s *)(cthdr + 1); u32 d_id = bfa_hton3b(FC_NAME_SERVER); fc_gs_fchdr_build(fchs, d_id, s_id, 0); fc_gs_cthdr_build(cthdr, s_id, GS_RPT_ID); memset(rptid, 0, sizeof(struct fcgs_rptid_req_s)); rptid->port_id = port_id; rptid->port_type = port_type; return sizeof(struct fcgs_rptid_req_s) + sizeof(struct ct_hdr_s); } u16 fc_ganxt_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id) { struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; struct fcgs_ganxt_req_s *ganxt = (struct fcgs_ganxt_req_s *)(cthdr + 1); u32 d_id = bfa_hton3b(FC_NAME_SERVER); fc_gs_fchdr_build(fchs, d_id, s_id, 0); fc_gs_cthdr_build(cthdr, s_id, GS_GA_NXT); memset(ganxt, 0, sizeof(struct fcgs_ganxt_req_s)); ganxt->port_id = port_id; return sizeof(struct ct_hdr_s) + sizeof(struct fcgs_ganxt_req_s); } /* * Builds fc hdr and ct hdr for FDMI requests. */ u16 fc_fdmi_reqhdr_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 cmd_code) { struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; u32 d_id = bfa_hton3b(FC_MGMT_SERVER); fc_gs_fchdr_build(fchs, d_id, s_id, 0); fc_gs_fdmi_cthdr_build(cthdr, s_id, cmd_code); return sizeof(struct ct_hdr_s); } /* * Given a FC4 Type, this function returns a fc4 type bitmask */ void fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask) { u8 index; __be32 *ptr = (__be32 *) bit_mask; u32 type_value; /* * @todo : Check for bitmask size */ index = fc4_type >> 5; type_value = 1 << (fc4_type % 32); ptr[index] = cpu_to_be32(type_value); } /* * GMAL Request */ u16 fc_gmal_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn) { struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; fcgs_gmal_req_t *gmal = (fcgs_gmal_req_t *) (cthdr + 1); u32 d_id = bfa_hton3b(FC_MGMT_SERVER); fc_gs_fchdr_build(fchs, d_id, s_id, 0); fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GMAL_CMD, CT_GSSUBTYPE_CFGSERVER); memset(gmal, 0, sizeof(fcgs_gmal_req_t)); gmal->wwn = wwn; return sizeof(struct ct_hdr_s) + sizeof(fcgs_gmal_req_t); } /* * GFN (Get Fabric Name) Request */ u16 fc_gfn_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn) { struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; fcgs_gfn_req_t *gfn = (fcgs_gfn_req_t *) (cthdr + 1); u32 d_id = bfa_hton3b(FC_MGMT_SERVER); fc_gs_fchdr_build(fchs, d_id, s_id, 0); fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GFN_CMD, CT_GSSUBTYPE_CFGSERVER); memset(gfn, 0, sizeof(fcgs_gfn_req_t)); gfn->wwn = wwn; return sizeof(struct ct_hdr_s) + sizeof(fcgs_gfn_req_t); }
linux-master
drivers/scsi/bfa/bfa_fcbuild.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. * Copyright (c) 2014- QLogic Corporation. * All rights reserved * www.qlogic.com * * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. */ /* * rport.c Remote port implementation. */ #include "bfad_drv.h" #include "bfad_im.h" #include "bfa_fcs.h" #include "bfa_fcbuild.h" BFA_TRC_FILE(FCS, RPORT); static u32 bfa_fcs_rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT * 1000; /* In millisecs */ /* * bfa_fcs_rport_max_logins is max count of bfa_fcs_rports * whereas DEF_CFG_NUM_RPORTS is max count of bfa_rports */ static u32 bfa_fcs_rport_max_logins = BFA_FCS_MAX_RPORT_LOGINS; /* * forward declarations */ static struct bfa_fcs_rport_s *bfa_fcs_rport_alloc( struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid); static void bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport); static void bfa_fcs_rport_hal_online(struct bfa_fcs_rport_s *rport); static void bfa_fcs_rport_fcs_online_action(struct bfa_fcs_rport_s *rport); static void bfa_fcs_rport_hal_online_action(struct bfa_fcs_rport_s *rport); static void bfa_fcs_rport_fcs_offline_action(struct bfa_fcs_rport_s *rport); static void bfa_fcs_rport_hal_offline_action(struct bfa_fcs_rport_s *rport); static void bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi); static void bfa_fcs_rport_timeout(void *arg); static void bfa_fcs_rport_send_plogi(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced); static void bfa_fcs_rport_send_plogiacc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced); static void bfa_fcs_rport_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs); static void bfa_fcs_rport_send_adisc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced); static void bfa_fcs_rport_adisc_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs); static void bfa_fcs_rport_send_nsdisc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced); static void bfa_fcs_rport_gidpn_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs); static void bfa_fcs_rport_gpnid_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs); static void bfa_fcs_rport_send_logo(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced); static void bfa_fcs_rport_send_logo_acc(void *rport_cbarg); static void bfa_fcs_rport_process_prli(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs, u16 len); static void bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs, u8 reason_code, u8 reason_code_expl); static void bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs, u16 len); static void bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport); static void bfa_fcs_rport_hal_offline(struct bfa_fcs_rport_s *rport); static void bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, enum rport_event event); static void bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport, enum rport_event event); static void bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport, enum rport_event event); static void bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport, enum rport_event event); static void bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event); static void bfa_fcs_rport_sm_fc4_fcs_online(struct bfa_fcs_rport_s *rport, enum rport_event event); static void bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport, enum rport_event event); static void bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event); static void bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport, enum rport_event event); static void bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event); static void bfa_fcs_rport_sm_adisc_online_sending( struct bfa_fcs_rport_s *rport, enum rport_event event); static void bfa_fcs_rport_sm_adisc_online(struct bfa_fcs_rport_s *rport, enum rport_event event); static void bfa_fcs_rport_sm_adisc_offline_sending(struct bfa_fcs_rport_s *rport, enum rport_event event); static void bfa_fcs_rport_sm_adisc_offline(struct bfa_fcs_rport_s *rport, enum rport_event event); static void bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport, enum rport_event event); static void bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport, enum rport_event event); static void bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport, enum rport_event event); static void bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport, enum rport_event event); static void bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport, enum rport_event event); static void bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport, enum rport_event event); static void bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport, enum rport_event event); static void bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event); static void bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport, enum rport_event event); static void bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport, enum rport_event event); static void bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport, enum rport_event event); static void bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport, enum rport_event event); static void bfa_fcs_rport_sm_fc4_off_delete(struct bfa_fcs_rport_s *rport, enum rport_event event); static void bfa_fcs_rport_sm_delete_pending(struct bfa_fcs_rport_s *rport, enum rport_event event); static struct bfa_sm_table_s rport_sm_table[] = { {BFA_SM(bfa_fcs_rport_sm_uninit), BFA_RPORT_UNINIT}, {BFA_SM(bfa_fcs_rport_sm_plogi_sending), BFA_RPORT_PLOGI}, {BFA_SM(bfa_fcs_rport_sm_plogiacc_sending), BFA_RPORT_ONLINE}, {BFA_SM(bfa_fcs_rport_sm_plogi_retry), BFA_RPORT_PLOGI_RETRY}, {BFA_SM(bfa_fcs_rport_sm_plogi), BFA_RPORT_PLOGI}, {BFA_SM(bfa_fcs_rport_sm_fc4_fcs_online), BFA_RPORT_ONLINE}, {BFA_SM(bfa_fcs_rport_sm_hal_online), BFA_RPORT_ONLINE}, {BFA_SM(bfa_fcs_rport_sm_online), BFA_RPORT_ONLINE}, {BFA_SM(bfa_fcs_rport_sm_nsquery_sending), BFA_RPORT_NSQUERY}, {BFA_SM(bfa_fcs_rport_sm_nsquery), BFA_RPORT_NSQUERY}, {BFA_SM(bfa_fcs_rport_sm_adisc_online_sending), BFA_RPORT_ADISC}, {BFA_SM(bfa_fcs_rport_sm_adisc_online), BFA_RPORT_ADISC}, {BFA_SM(bfa_fcs_rport_sm_adisc_offline_sending), BFA_RPORT_ADISC}, {BFA_SM(bfa_fcs_rport_sm_adisc_offline), BFA_RPORT_ADISC}, {BFA_SM(bfa_fcs_rport_sm_fc4_logorcv), BFA_RPORT_LOGORCV}, {BFA_SM(bfa_fcs_rport_sm_fc4_logosend), BFA_RPORT_LOGO}, {BFA_SM(bfa_fcs_rport_sm_fc4_offline), BFA_RPORT_OFFLINE}, {BFA_SM(bfa_fcs_rport_sm_hcb_offline), BFA_RPORT_OFFLINE}, {BFA_SM(bfa_fcs_rport_sm_hcb_logorcv), BFA_RPORT_LOGORCV}, {BFA_SM(bfa_fcs_rport_sm_hcb_logosend), BFA_RPORT_LOGO}, {BFA_SM(bfa_fcs_rport_sm_logo_sending), BFA_RPORT_LOGO}, {BFA_SM(bfa_fcs_rport_sm_offline), BFA_RPORT_OFFLINE}, {BFA_SM(bfa_fcs_rport_sm_nsdisc_sending), BFA_RPORT_NSDISC}, {BFA_SM(bfa_fcs_rport_sm_nsdisc_retry), BFA_RPORT_NSDISC}, {BFA_SM(bfa_fcs_rport_sm_nsdisc_sent), BFA_RPORT_NSDISC}, }; /* * Beginning state. */ static void bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, enum rport_event event) { bfa_trc(rport->fcs, rport->pwwn); bfa_trc(rport->fcs, rport->pid); bfa_trc(rport->fcs, event); switch (event) { case RPSM_EVENT_PLOGI_SEND: bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending); rport->plogi_retries = 0; bfa_fcs_rport_send_plogi(rport, NULL); break; case RPSM_EVENT_PLOGI_RCVD: bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); bfa_fcs_rport_send_plogiacc(rport, NULL); break; case RPSM_EVENT_PLOGI_COMP: bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online); bfa_fcs_rport_hal_online(rport); break; case RPSM_EVENT_ADDRESS_CHANGE: case RPSM_EVENT_ADDRESS_DISC: bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); rport->ns_retries = 0; bfa_fcs_rport_send_nsdisc(rport, NULL); break; default: bfa_sm_fault(rport->fcs, event); } } /* * PLOGI is being sent. */ static void bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport, enum rport_event event) { bfa_trc(rport->fcs, rport->pwwn); bfa_trc(rport->fcs, rport->pid); bfa_trc(rport->fcs, event); switch (event) { case RPSM_EVENT_FCXP_SENT: bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi); break; case RPSM_EVENT_DELETE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); bfa_fcs_rport_free(rport); break; case RPSM_EVENT_PLOGI_RCVD: bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); bfa_fcs_rport_send_plogiacc(rport, NULL); break; case RPSM_EVENT_SCN_OFFLINE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); bfa_timer_start(rport->fcs->bfa, &rport->timer, bfa_fcs_rport_timeout, rport, bfa_fcs_rport_del_timeout); break; case RPSM_EVENT_ADDRESS_CHANGE: case RPSM_EVENT_FAB_SCN: /* query the NS */ bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); WARN_ON(!(bfa_fcport_get_topology(rport->port->fcs->bfa) != BFA_PORT_TOPOLOGY_LOOP)); bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); rport->ns_retries = 0; bfa_fcs_rport_send_nsdisc(rport, NULL); break; case RPSM_EVENT_LOGO_IMP: rport->pid = 0; bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); bfa_timer_start(rport->fcs->bfa, &rport->timer, bfa_fcs_rport_timeout, rport, bfa_fcs_rport_del_timeout); break; default: bfa_sm_fault(rport->fcs, event); } } /* * PLOGI is being sent. */ static void bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport, enum rport_event event) { bfa_trc(rport->fcs, rport->pwwn); bfa_trc(rport->fcs, rport->pid); bfa_trc(rport->fcs, event); switch (event) { case RPSM_EVENT_FCXP_SENT: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online); bfa_fcs_rport_fcs_online_action(rport); break; case RPSM_EVENT_DELETE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); bfa_fcs_rport_free(rport); break; case RPSM_EVENT_PLOGI_RCVD: case RPSM_EVENT_PLOGI_COMP: case RPSM_EVENT_FAB_SCN: /* * Ignore, SCN is possibly online notification. */ break; case RPSM_EVENT_SCN_OFFLINE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); bfa_timer_start(rport->fcs->bfa, &rport->timer, bfa_fcs_rport_timeout, rport, bfa_fcs_rport_del_timeout); break; case RPSM_EVENT_ADDRESS_CHANGE: bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); rport->ns_retries = 0; bfa_fcs_rport_send_nsdisc(rport, NULL); break; case RPSM_EVENT_LOGO_IMP: rport->pid = 0; bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); bfa_timer_start(rport->fcs->bfa, &rport->timer, bfa_fcs_rport_timeout, rport, bfa_fcs_rport_del_timeout); break; case RPSM_EVENT_HCB_OFFLINE: /* * Ignore BFA callback, on a PLOGI receive we call bfa offline. */ break; default: bfa_sm_fault(rport->fcs, event); } } /* * PLOGI is sent. */ static void bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport, enum rport_event event) { bfa_trc(rport->fcs, rport->pwwn); bfa_trc(rport->fcs, rport->pid); bfa_trc(rport->fcs, event); switch (event) { case RPSM_EVENT_TIMEOUT: bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending); bfa_fcs_rport_send_plogi(rport, NULL); break; case RPSM_EVENT_DELETE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); bfa_timer_stop(&rport->timer); bfa_fcs_rport_free(rport); break; case RPSM_EVENT_PRLO_RCVD: case RPSM_EVENT_LOGO_RCVD: break; case RPSM_EVENT_PLOGI_RCVD: bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); bfa_timer_stop(&rport->timer); bfa_fcs_rport_send_plogiacc(rport, NULL); break; case RPSM_EVENT_SCN_OFFLINE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); bfa_timer_stop(&rport->timer); bfa_timer_start(rport->fcs->bfa, &rport->timer, bfa_fcs_rport_timeout, rport, bfa_fcs_rport_del_timeout); break; case RPSM_EVENT_ADDRESS_CHANGE: case RPSM_EVENT_FAB_SCN: bfa_timer_stop(&rport->timer); WARN_ON(!(bfa_fcport_get_topology(rport->port->fcs->bfa) != BFA_PORT_TOPOLOGY_LOOP)); bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); rport->ns_retries = 0; bfa_fcs_rport_send_nsdisc(rport, NULL); break; case RPSM_EVENT_LOGO_IMP: rport->pid = 0; bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); bfa_timer_stop(&rport->timer); bfa_timer_start(rport->fcs->bfa, &rport->timer, bfa_fcs_rport_timeout, rport, bfa_fcs_rport_del_timeout); break; case RPSM_EVENT_PLOGI_COMP: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online); bfa_timer_stop(&rport->timer); bfa_fcs_rport_fcs_online_action(rport); break; default: bfa_sm_fault(rport->fcs, event); } } /* * PLOGI is sent. */ static void bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event) { bfa_trc(rport->fcs, rport->pwwn); bfa_trc(rport->fcs, rport->pid); bfa_trc(rport->fcs, event); switch (event) { case RPSM_EVENT_ACCEPTED: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online); rport->plogi_retries = 0; bfa_fcs_rport_fcs_online_action(rport); break; case RPSM_EVENT_LOGO_RCVD: bfa_fcs_rport_send_logo_acc(rport); fallthrough; case RPSM_EVENT_PRLO_RCVD: if (rport->prlo == BFA_TRUE) bfa_fcs_rport_send_prlo_acc(rport); bfa_fcxp_discard(rport->fcxp); fallthrough; case RPSM_EVENT_FAILED: if (rport->plogi_retries < BFA_FCS_RPORT_MAX_RETRIES) { rport->plogi_retries++; bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_retry); bfa_timer_start(rport->fcs->bfa, &rport->timer, bfa_fcs_rport_timeout, rport, BFA_FCS_RETRY_TIMEOUT); } else { bfa_stats(rport->port, rport_del_max_plogi_retry); rport->old_pid = rport->pid; rport->pid = 0; bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); bfa_timer_start(rport->fcs->bfa, &rport->timer, bfa_fcs_rport_timeout, rport, bfa_fcs_rport_del_timeout); } break; case RPSM_EVENT_SCN_ONLINE: break; case RPSM_EVENT_SCN_OFFLINE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); bfa_fcxp_discard(rport->fcxp); bfa_timer_start(rport->fcs->bfa, &rport->timer, bfa_fcs_rport_timeout, rport, bfa_fcs_rport_del_timeout); break; case RPSM_EVENT_PLOGI_RETRY: rport->plogi_retries = 0; bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_retry); bfa_timer_start(rport->fcs->bfa, &rport->timer, bfa_fcs_rport_timeout, rport, (FC_RA_TOV * 1000)); break; case RPSM_EVENT_LOGO_IMP: rport->pid = 0; bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); bfa_fcxp_discard(rport->fcxp); bfa_timer_start(rport->fcs->bfa, &rport->timer, bfa_fcs_rport_timeout, rport, bfa_fcs_rport_del_timeout); break; case RPSM_EVENT_ADDRESS_CHANGE: case RPSM_EVENT_FAB_SCN: bfa_fcxp_discard(rport->fcxp); WARN_ON(!(bfa_fcport_get_topology(rport->port->fcs->bfa) != BFA_PORT_TOPOLOGY_LOOP)); bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); rport->ns_retries = 0; bfa_fcs_rport_send_nsdisc(rport, NULL); break; case RPSM_EVENT_PLOGI_RCVD: bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); bfa_fcxp_discard(rport->fcxp); bfa_fcs_rport_send_plogiacc(rport, NULL); break; case RPSM_EVENT_DELETE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); bfa_fcxp_discard(rport->fcxp); bfa_fcs_rport_free(rport); break; case RPSM_EVENT_PLOGI_COMP: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online); bfa_fcxp_discard(rport->fcxp); bfa_fcs_rport_fcs_online_action(rport); break; default: bfa_sm_fault(rport->fcs, event); } } /* * PLOGI is done. Await bfa_fcs_itnim to ascertain the scsi function */ static void bfa_fcs_rport_sm_fc4_fcs_online(struct bfa_fcs_rport_s *rport, enum rport_event event) { bfa_trc(rport->fcs, rport->pwwn); bfa_trc(rport->fcs, rport->pid); bfa_trc(rport->fcs, event); switch (event) { case RPSM_EVENT_FC4_FCS_ONLINE: if (rport->scsi_function == BFA_RPORT_INITIATOR) { if (!BFA_FCS_PID_IS_WKA(rport->pid)) bfa_fcs_rpf_rport_online(rport); bfa_sm_set_state(rport, bfa_fcs_rport_sm_online); break; } if (!rport->bfa_rport) rport->bfa_rport = bfa_rport_create(rport->fcs->bfa, rport); if (rport->bfa_rport) { bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online); bfa_fcs_rport_hal_online(rport); } else { bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend); bfa_fcs_rport_fcs_offline_action(rport); } break; case RPSM_EVENT_PLOGI_RCVD: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); rport->plogi_pending = BFA_TRUE; bfa_fcs_rport_fcs_offline_action(rport); break; case RPSM_EVENT_PLOGI_COMP: case RPSM_EVENT_LOGO_IMP: case RPSM_EVENT_ADDRESS_CHANGE: case RPSM_EVENT_FAB_SCN: case RPSM_EVENT_SCN_OFFLINE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); bfa_fcs_rport_fcs_offline_action(rport); break; case RPSM_EVENT_LOGO_RCVD: case RPSM_EVENT_PRLO_RCVD: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv); bfa_fcs_rport_fcs_offline_action(rport); break; case RPSM_EVENT_DELETE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend); bfa_fcs_rport_fcs_offline_action(rport); break; default: bfa_sm_fault(rport->fcs, event); break; } } /* * PLOGI is complete. Awaiting BFA rport online callback. FC-4s * are offline. */ static void bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport, enum rport_event event) { bfa_trc(rport->fcs, rport->pwwn); bfa_trc(rport->fcs, rport->pid); bfa_trc(rport->fcs, event); switch (event) { case RPSM_EVENT_HCB_ONLINE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_online); bfa_fcs_rport_hal_online_action(rport); break; case RPSM_EVENT_PLOGI_COMP: break; case RPSM_EVENT_PRLO_RCVD: case RPSM_EVENT_LOGO_RCVD: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv); bfa_fcs_rport_fcs_offline_action(rport); break; case RPSM_EVENT_FAB_SCN: case RPSM_EVENT_LOGO_IMP: case RPSM_EVENT_ADDRESS_CHANGE: case RPSM_EVENT_SCN_OFFLINE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); bfa_fcs_rport_fcs_offline_action(rport); break; case RPSM_EVENT_PLOGI_RCVD: rport->plogi_pending = BFA_TRUE; bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); bfa_fcs_rport_fcs_offline_action(rport); break; case RPSM_EVENT_DELETE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend); bfa_fcs_rport_fcs_offline_action(rport); break; default: bfa_sm_fault(rport->fcs, event); } } /* * Rport is ONLINE. FC-4s active. */ static void bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event) { bfa_trc(rport->fcs, rport->pwwn); bfa_trc(rport->fcs, rport->pid); bfa_trc(rport->fcs, event); switch (event) { case RPSM_EVENT_FAB_SCN: if (bfa_fcs_fabric_is_switched(rport->port->fabric)) { bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsquery_sending); rport->ns_retries = 0; bfa_fcs_rport_send_nsdisc(rport, NULL); } else { bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_online_sending); bfa_fcs_rport_send_adisc(rport, NULL); } break; case RPSM_EVENT_PLOGI_RCVD: case RPSM_EVENT_LOGO_IMP: case RPSM_EVENT_ADDRESS_CHANGE: case RPSM_EVENT_SCN_OFFLINE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); bfa_fcs_rport_hal_offline_action(rport); break; case RPSM_EVENT_DELETE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend); bfa_fcs_rport_hal_offline_action(rport); break; case RPSM_EVENT_LOGO_RCVD: case RPSM_EVENT_PRLO_RCVD: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv); bfa_fcs_rport_hal_offline_action(rport); break; case RPSM_EVENT_SCN_ONLINE: case RPSM_EVENT_PLOGI_COMP: break; default: bfa_sm_fault(rport->fcs, event); } } /* * An SCN event is received in ONLINE state. NS query is being sent * prior to ADISC authentication with rport. FC-4s are paused. */ static void bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport, enum rport_event event) { bfa_trc(rport->fcs, rport->pwwn); bfa_trc(rport->fcs, rport->pid); bfa_trc(rport->fcs, event); switch (event) { case RPSM_EVENT_FCXP_SENT: bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsquery); break; case RPSM_EVENT_DELETE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend); bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); bfa_fcs_rport_hal_offline_action(rport); break; case RPSM_EVENT_FAB_SCN: /* * ignore SCN, wait for response to query itself */ break; case RPSM_EVENT_LOGO_RCVD: case RPSM_EVENT_PRLO_RCVD: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv); bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); bfa_fcs_rport_hal_offline_action(rport); break; case RPSM_EVENT_LOGO_IMP: case RPSM_EVENT_PLOGI_RCVD: case RPSM_EVENT_ADDRESS_CHANGE: case RPSM_EVENT_PLOGI_COMP: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); bfa_fcs_rport_hal_offline_action(rport); break; default: bfa_sm_fault(rport->fcs, event); } } /* * An SCN event is received in ONLINE state. NS query is sent to rport. * FC-4s are paused. */ static void bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event) { bfa_trc(rport->fcs, rport->pwwn); bfa_trc(rport->fcs, rport->pid); bfa_trc(rport->fcs, event); switch (event) { case RPSM_EVENT_ACCEPTED: bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_online_sending); bfa_fcs_rport_send_adisc(rport, NULL); break; case RPSM_EVENT_FAILED: rport->ns_retries++; if (rport->ns_retries < BFA_FCS_RPORT_MAX_RETRIES) { bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsquery_sending); bfa_fcs_rport_send_nsdisc(rport, NULL); } else { bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); bfa_fcs_rport_hal_offline_action(rport); } break; case RPSM_EVENT_DELETE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend); bfa_fcxp_discard(rport->fcxp); bfa_fcs_rport_hal_offline_action(rport); break; case RPSM_EVENT_FAB_SCN: break; case RPSM_EVENT_LOGO_RCVD: case RPSM_EVENT_PRLO_RCVD: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv); bfa_fcxp_discard(rport->fcxp); bfa_fcs_rport_hal_offline_action(rport); break; case RPSM_EVENT_PLOGI_COMP: case RPSM_EVENT_ADDRESS_CHANGE: case RPSM_EVENT_PLOGI_RCVD: case RPSM_EVENT_LOGO_IMP: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); bfa_fcxp_discard(rport->fcxp); bfa_fcs_rport_hal_offline_action(rport); break; default: bfa_sm_fault(rport->fcs, event); } } /* * An SCN event is received in ONLINE state. ADISC is being sent for * authenticating with rport. FC-4s are paused. */ static void bfa_fcs_rport_sm_adisc_online_sending(struct bfa_fcs_rport_s *rport, enum rport_event event) { bfa_trc(rport->fcs, rport->pwwn); bfa_trc(rport->fcs, rport->pid); bfa_trc(rport->fcs, event); switch (event) { case RPSM_EVENT_FCXP_SENT: bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_online); break; case RPSM_EVENT_DELETE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend); bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); bfa_fcs_rport_hal_offline_action(rport); break; case RPSM_EVENT_LOGO_IMP: case RPSM_EVENT_ADDRESS_CHANGE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); bfa_fcs_rport_hal_offline_action(rport); break; case RPSM_EVENT_LOGO_RCVD: case RPSM_EVENT_PRLO_RCVD: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv); bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); bfa_fcs_rport_hal_offline_action(rport); break; case RPSM_EVENT_FAB_SCN: break; case RPSM_EVENT_PLOGI_RCVD: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); bfa_fcs_rport_hal_offline_action(rport); break; default: bfa_sm_fault(rport->fcs, event); } } /* * An SCN event is received in ONLINE state. ADISC is to rport. * FC-4s are paused. */ static void bfa_fcs_rport_sm_adisc_online(struct bfa_fcs_rport_s *rport, enum rport_event event) { bfa_trc(rport->fcs, rport->pwwn); bfa_trc(rport->fcs, rport->pid); bfa_trc(rport->fcs, event); switch (event) { case RPSM_EVENT_ACCEPTED: bfa_sm_set_state(rport, bfa_fcs_rport_sm_online); break; case RPSM_EVENT_PLOGI_RCVD: /* * Too complex to cleanup FC-4 & rport and then acc to PLOGI. * At least go offline when a PLOGI is received. */ bfa_fcxp_discard(rport->fcxp); fallthrough; case RPSM_EVENT_FAILED: case RPSM_EVENT_ADDRESS_CHANGE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); bfa_fcs_rport_hal_offline_action(rport); break; case RPSM_EVENT_DELETE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend); bfa_fcxp_discard(rport->fcxp); bfa_fcs_rport_hal_offline_action(rport); break; case RPSM_EVENT_FAB_SCN: /* * already processing RSCN */ break; case RPSM_EVENT_LOGO_IMP: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); bfa_fcxp_discard(rport->fcxp); bfa_fcs_rport_hal_offline_action(rport); break; case RPSM_EVENT_LOGO_RCVD: case RPSM_EVENT_PRLO_RCVD: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv); bfa_fcxp_discard(rport->fcxp); bfa_fcs_rport_hal_offline_action(rport); break; default: bfa_sm_fault(rport->fcs, event); } } /* * ADISC is being sent for authenticating with rport * Already did offline actions. */ static void bfa_fcs_rport_sm_adisc_offline_sending(struct bfa_fcs_rport_s *rport, enum rport_event event) { bfa_trc(rport->fcs, rport->pwwn); bfa_trc(rport->fcs, rport->pid); bfa_trc(rport->fcs, event); switch (event) { case RPSM_EVENT_FCXP_SENT: bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_offline); break; case RPSM_EVENT_DELETE: case RPSM_EVENT_SCN_OFFLINE: case RPSM_EVENT_LOGO_IMP: case RPSM_EVENT_LOGO_RCVD: case RPSM_EVENT_PRLO_RCVD: bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); bfa_timer_start(rport->fcs->bfa, &rport->timer, bfa_fcs_rport_timeout, rport, bfa_fcs_rport_del_timeout); break; case RPSM_EVENT_PLOGI_RCVD: bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); bfa_fcs_rport_send_plogiacc(rport, NULL); break; default: bfa_sm_fault(rport->fcs, event); } } /* * ADISC to rport * Already did offline actions */ static void bfa_fcs_rport_sm_adisc_offline(struct bfa_fcs_rport_s *rport, enum rport_event event) { bfa_trc(rport->fcs, rport->pwwn); bfa_trc(rport->fcs, rport->pid); bfa_trc(rport->fcs, event); switch (event) { case RPSM_EVENT_ACCEPTED: bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online); bfa_fcs_rport_hal_online(rport); break; case RPSM_EVENT_PLOGI_RCVD: bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); bfa_fcxp_discard(rport->fcxp); bfa_fcs_rport_send_plogiacc(rport, NULL); break; case RPSM_EVENT_FAILED: bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); bfa_timer_start(rport->fcs->bfa, &rport->timer, bfa_fcs_rport_timeout, rport, bfa_fcs_rport_del_timeout); break; case RPSM_EVENT_DELETE: case RPSM_EVENT_SCN_OFFLINE: case RPSM_EVENT_LOGO_IMP: case RPSM_EVENT_LOGO_RCVD: case RPSM_EVENT_PRLO_RCVD: bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); bfa_fcxp_discard(rport->fcxp); bfa_timer_start(rport->fcs->bfa, &rport->timer, bfa_fcs_rport_timeout, rport, bfa_fcs_rport_del_timeout); break; default: bfa_sm_fault(rport->fcs, event); } } /* * Rport has sent LOGO. Awaiting FC-4 offline completion callback. */ static void bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport, enum rport_event event) { bfa_trc(rport->fcs, rport->pwwn); bfa_trc(rport->fcs, rport->pid); bfa_trc(rport->fcs, event); switch (event) { case RPSM_EVENT_FC4_OFFLINE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logorcv); bfa_fcs_rport_hal_offline(rport); break; case RPSM_EVENT_DELETE: if (rport->pid && (rport->prlo == BFA_TRUE)) bfa_fcs_rport_send_prlo_acc(rport); if (rport->pid && (rport->prlo == BFA_FALSE)) bfa_fcs_rport_send_logo_acc(rport); bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_off_delete); break; case RPSM_EVENT_SCN_ONLINE: case RPSM_EVENT_SCN_OFFLINE: case RPSM_EVENT_HCB_ONLINE: case RPSM_EVENT_LOGO_RCVD: case RPSM_EVENT_PRLO_RCVD: case RPSM_EVENT_ADDRESS_CHANGE: break; default: bfa_sm_fault(rport->fcs, event); } } /* * LOGO needs to be sent to rport. Awaiting FC-4 offline completion * callback. */ static void bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport, enum rport_event event) { bfa_trc(rport->fcs, rport->pwwn); bfa_trc(rport->fcs, rport->pid); bfa_trc(rport->fcs, event); switch (event) { case RPSM_EVENT_FC4_OFFLINE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logosend); bfa_fcs_rport_hal_offline(rport); break; case RPSM_EVENT_LOGO_RCVD: bfa_fcs_rport_send_logo_acc(rport); fallthrough; case RPSM_EVENT_PRLO_RCVD: if (rport->prlo == BFA_TRUE) bfa_fcs_rport_send_prlo_acc(rport); bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_off_delete); break; case RPSM_EVENT_HCB_ONLINE: case RPSM_EVENT_DELETE: /* Rport is being deleted */ break; default: bfa_sm_fault(rport->fcs, event); } } /* * Rport is going offline. Awaiting FC-4 offline completion callback. */ static void bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport, enum rport_event event) { bfa_trc(rport->fcs, rport->pwwn); bfa_trc(rport->fcs, rport->pid); bfa_trc(rport->fcs, event); switch (event) { case RPSM_EVENT_FC4_OFFLINE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline); bfa_fcs_rport_hal_offline(rport); break; case RPSM_EVENT_SCN_ONLINE: break; case RPSM_EVENT_LOGO_RCVD: /* * Rport is going offline. Just ack the logo */ bfa_fcs_rport_send_logo_acc(rport); break; case RPSM_EVENT_PRLO_RCVD: bfa_fcs_rport_send_prlo_acc(rport); break; case RPSM_EVENT_SCN_OFFLINE: case RPSM_EVENT_HCB_ONLINE: case RPSM_EVENT_FAB_SCN: case RPSM_EVENT_LOGO_IMP: case RPSM_EVENT_ADDRESS_CHANGE: /* * rport is already going offline. * SCN - ignore and wait till transitioning to offline state */ break; case RPSM_EVENT_DELETE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend); break; default: bfa_sm_fault(rport->fcs, event); } } /* * Rport is offline. FC-4s are offline. Awaiting BFA rport offline * callback. */ static void bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport, enum rport_event event) { bfa_trc(rport->fcs, rport->pwwn); bfa_trc(rport->fcs, rport->pid); bfa_trc(rport->fcs, event); switch (event) { case RPSM_EVENT_HCB_OFFLINE: if (bfa_fcs_lport_is_online(rport->port) && (rport->plogi_pending)) { rport->plogi_pending = BFA_FALSE; bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); bfa_fcs_rport_send_plogiacc(rport, NULL); break; } fallthrough; case RPSM_EVENT_ADDRESS_CHANGE: if (!bfa_fcs_lport_is_online(rport->port)) { rport->pid = 0; bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); bfa_timer_start(rport->fcs->bfa, &rport->timer, bfa_fcs_rport_timeout, rport, bfa_fcs_rport_del_timeout); break; } if (bfa_fcs_fabric_is_switched(rport->port->fabric)) { bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); rport->ns_retries = 0; bfa_fcs_rport_send_nsdisc(rport, NULL); } else if (bfa_fcport_get_topology(rport->port->fcs->bfa) == BFA_PORT_TOPOLOGY_LOOP) { if (rport->scn_online) { bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_offline_sending); bfa_fcs_rport_send_adisc(rport, NULL); } else { bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); bfa_timer_start(rport->fcs->bfa, &rport->timer, bfa_fcs_rport_timeout, rport, bfa_fcs_rport_del_timeout); } } else { bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending); rport->plogi_retries = 0; bfa_fcs_rport_send_plogi(rport, NULL); } break; case RPSM_EVENT_DELETE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); bfa_fcs_rport_free(rport); break; case RPSM_EVENT_SCN_ONLINE: case RPSM_EVENT_SCN_OFFLINE: case RPSM_EVENT_FAB_SCN: case RPSM_EVENT_LOGO_RCVD: case RPSM_EVENT_PRLO_RCVD: case RPSM_EVENT_PLOGI_RCVD: case RPSM_EVENT_LOGO_IMP: /* * Ignore, already offline. */ break; default: bfa_sm_fault(rport->fcs, event); } } /* * Rport is offline. FC-4s are offline. Awaiting BFA rport offline * callback to send LOGO accept. */ static void bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport, enum rport_event event) { bfa_trc(rport->fcs, rport->pwwn); bfa_trc(rport->fcs, rport->pid); bfa_trc(rport->fcs, event); switch (event) { case RPSM_EVENT_HCB_OFFLINE: case RPSM_EVENT_ADDRESS_CHANGE: if (rport->pid && (rport->prlo == BFA_TRUE)) bfa_fcs_rport_send_prlo_acc(rport); if (rport->pid && (rport->prlo == BFA_FALSE)) bfa_fcs_rport_send_logo_acc(rport); /* * If the lport is online and if the rport is not a well * known address port, * we try to re-discover the r-port. */ if (bfa_fcs_lport_is_online(rport->port) && (!BFA_FCS_PID_IS_WKA(rport->pid))) { if (bfa_fcs_fabric_is_switched(rport->port->fabric)) { bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); rport->ns_retries = 0; bfa_fcs_rport_send_nsdisc(rport, NULL); } else { /* For N2N Direct Attach, try to re-login */ bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending); rport->plogi_retries = 0; bfa_fcs_rport_send_plogi(rport, NULL); } } else { /* * if it is not a well known address, reset the * pid to 0. */ if (!BFA_FCS_PID_IS_WKA(rport->pid)) rport->pid = 0; bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); bfa_timer_start(rport->fcs->bfa, &rport->timer, bfa_fcs_rport_timeout, rport, bfa_fcs_rport_del_timeout); } break; case RPSM_EVENT_DELETE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_delete_pending); if (rport->pid && (rport->prlo == BFA_TRUE)) bfa_fcs_rport_send_prlo_acc(rport); if (rport->pid && (rport->prlo == BFA_FALSE)) bfa_fcs_rport_send_logo_acc(rport); break; case RPSM_EVENT_LOGO_IMP: bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline); break; case RPSM_EVENT_SCN_ONLINE: case RPSM_EVENT_SCN_OFFLINE: case RPSM_EVENT_LOGO_RCVD: case RPSM_EVENT_PRLO_RCVD: /* * Ignore - already processing a LOGO. */ break; default: bfa_sm_fault(rport->fcs, event); } } /* * Rport is being deleted. FC-4s are offline. * Awaiting BFA rport offline * callback to send LOGO. */ static void bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport, enum rport_event event) { bfa_trc(rport->fcs, rport->pwwn); bfa_trc(rport->fcs, rport->pid); bfa_trc(rport->fcs, event); switch (event) { case RPSM_EVENT_HCB_OFFLINE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_logo_sending); bfa_fcs_rport_send_logo(rport, NULL); break; case RPSM_EVENT_LOGO_RCVD: bfa_fcs_rport_send_logo_acc(rport); fallthrough; case RPSM_EVENT_PRLO_RCVD: if (rport->prlo == BFA_TRUE) bfa_fcs_rport_send_prlo_acc(rport); bfa_sm_set_state(rport, bfa_fcs_rport_sm_delete_pending); break; case RPSM_EVENT_SCN_ONLINE: case RPSM_EVENT_SCN_OFFLINE: case RPSM_EVENT_ADDRESS_CHANGE: break; default: bfa_sm_fault(rport->fcs, event); } } /* * Rport is being deleted. FC-4s are offline. LOGO is being sent. */ static void bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport, enum rport_event event) { bfa_trc(rport->fcs, rport->pwwn); bfa_trc(rport->fcs, rport->pid); bfa_trc(rport->fcs, event); switch (event) { case RPSM_EVENT_FCXP_SENT: /* Once LOGO is sent, we donot wait for the response */ bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); bfa_fcs_rport_free(rport); break; case RPSM_EVENT_SCN_ONLINE: case RPSM_EVENT_SCN_OFFLINE: case RPSM_EVENT_FAB_SCN: case RPSM_EVENT_ADDRESS_CHANGE: break; case RPSM_EVENT_LOGO_RCVD: bfa_fcs_rport_send_logo_acc(rport); fallthrough; case RPSM_EVENT_PRLO_RCVD: if (rport->prlo == BFA_TRUE) bfa_fcs_rport_send_prlo_acc(rport); bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); bfa_fcs_rport_free(rport); break; default: bfa_sm_fault(rport->fcs, event); } } /* * Rport is offline. FC-4s are offline. BFA rport is offline. * Timer active to delete stale rport. */ static void bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event) { bfa_trc(rport->fcs, rport->pwwn); bfa_trc(rport->fcs, rport->pid); bfa_trc(rport->fcs, event); switch (event) { case RPSM_EVENT_TIMEOUT: bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); bfa_fcs_rport_free(rport); break; case RPSM_EVENT_FAB_SCN: case RPSM_EVENT_ADDRESS_CHANGE: bfa_timer_stop(&rport->timer); WARN_ON(!(bfa_fcport_get_topology(rport->port->fcs->bfa) != BFA_PORT_TOPOLOGY_LOOP)); bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); rport->ns_retries = 0; bfa_fcs_rport_send_nsdisc(rport, NULL); break; case RPSM_EVENT_DELETE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); bfa_timer_stop(&rport->timer); bfa_fcs_rport_free(rport); break; case RPSM_EVENT_PLOGI_RCVD: bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); bfa_timer_stop(&rport->timer); bfa_fcs_rport_send_plogiacc(rport, NULL); break; case RPSM_EVENT_LOGO_RCVD: case RPSM_EVENT_PRLO_RCVD: case RPSM_EVENT_LOGO_IMP: case RPSM_EVENT_SCN_OFFLINE: break; case RPSM_EVENT_PLOGI_COMP: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online); bfa_timer_stop(&rport->timer); bfa_fcs_rport_fcs_online_action(rport); break; case RPSM_EVENT_SCN_ONLINE: bfa_timer_stop(&rport->timer); bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending); bfa_fcs_rport_send_plogi(rport, NULL); break; case RPSM_EVENT_PLOGI_SEND: bfa_timer_stop(&rport->timer); bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending); rport->plogi_retries = 0; bfa_fcs_rport_send_plogi(rport, NULL); break; default: bfa_sm_fault(rport->fcs, event); } } /* * Rport address has changed. Nameserver discovery request is being sent. */ static void bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport, enum rport_event event) { bfa_trc(rport->fcs, rport->pwwn); bfa_trc(rport->fcs, rport->pid); bfa_trc(rport->fcs, event); switch (event) { case RPSM_EVENT_FCXP_SENT: bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sent); break; case RPSM_EVENT_DELETE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); bfa_fcs_rport_free(rport); break; case RPSM_EVENT_PLOGI_RCVD: bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); bfa_fcs_rport_send_plogiacc(rport, NULL); break; case RPSM_EVENT_FAB_SCN: case RPSM_EVENT_LOGO_RCVD: case RPSM_EVENT_PRLO_RCVD: case RPSM_EVENT_PLOGI_SEND: break; case RPSM_EVENT_ADDRESS_CHANGE: rport->ns_retries = 0; /* reset the retry count */ break; case RPSM_EVENT_LOGO_IMP: bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); bfa_timer_start(rport->fcs->bfa, &rport->timer, bfa_fcs_rport_timeout, rport, bfa_fcs_rport_del_timeout); break; case RPSM_EVENT_PLOGI_COMP: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online); bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); bfa_fcs_rport_fcs_online_action(rport); break; default: bfa_sm_fault(rport->fcs, event); } } /* * Nameserver discovery failed. Waiting for timeout to retry. */ static void bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport, enum rport_event event) { bfa_trc(rport->fcs, rport->pwwn); bfa_trc(rport->fcs, rport->pid); bfa_trc(rport->fcs, event); switch (event) { case RPSM_EVENT_TIMEOUT: bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); bfa_fcs_rport_send_nsdisc(rport, NULL); break; case RPSM_EVENT_FAB_SCN: case RPSM_EVENT_ADDRESS_CHANGE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); bfa_timer_stop(&rport->timer); rport->ns_retries = 0; bfa_fcs_rport_send_nsdisc(rport, NULL); break; case RPSM_EVENT_DELETE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); bfa_timer_stop(&rport->timer); bfa_fcs_rport_free(rport); break; case RPSM_EVENT_PLOGI_RCVD: bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); bfa_timer_stop(&rport->timer); bfa_fcs_rport_send_plogiacc(rport, NULL); break; case RPSM_EVENT_LOGO_IMP: rport->pid = 0; bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); bfa_timer_stop(&rport->timer); bfa_timer_start(rport->fcs->bfa, &rport->timer, bfa_fcs_rport_timeout, rport, bfa_fcs_rport_del_timeout); break; case RPSM_EVENT_LOGO_RCVD: bfa_fcs_rport_send_logo_acc(rport); break; case RPSM_EVENT_PRLO_RCVD: bfa_fcs_rport_send_prlo_acc(rport); break; case RPSM_EVENT_PLOGI_COMP: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online); bfa_timer_stop(&rport->timer); bfa_fcs_rport_fcs_online_action(rport); break; default: bfa_sm_fault(rport->fcs, event); } } /* * Rport address has changed. Nameserver discovery request is sent. */ static void bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport, enum rport_event event) { bfa_trc(rport->fcs, rport->pwwn); bfa_trc(rport->fcs, rport->pid); bfa_trc(rport->fcs, event); switch (event) { case RPSM_EVENT_ACCEPTED: case RPSM_EVENT_ADDRESS_CHANGE: if (rport->pid) { bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending); bfa_fcs_rport_send_plogi(rport, NULL); } else { bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); rport->ns_retries = 0; bfa_fcs_rport_send_nsdisc(rport, NULL); } break; case RPSM_EVENT_FAILED: rport->ns_retries++; if (rport->ns_retries < BFA_FCS_RPORT_MAX_RETRIES) { bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); bfa_fcs_rport_send_nsdisc(rport, NULL); } else { rport->old_pid = rport->pid; rport->pid = 0; bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); bfa_timer_start(rport->fcs->bfa, &rport->timer, bfa_fcs_rport_timeout, rport, bfa_fcs_rport_del_timeout); } break; case RPSM_EVENT_DELETE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); bfa_fcxp_discard(rport->fcxp); bfa_fcs_rport_free(rport); break; case RPSM_EVENT_PLOGI_RCVD: bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); bfa_fcxp_discard(rport->fcxp); bfa_fcs_rport_send_plogiacc(rport, NULL); break; case RPSM_EVENT_LOGO_IMP: rport->pid = 0; bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); bfa_fcxp_discard(rport->fcxp); bfa_timer_start(rport->fcs->bfa, &rport->timer, bfa_fcs_rport_timeout, rport, bfa_fcs_rport_del_timeout); break; case RPSM_EVENT_PRLO_RCVD: bfa_fcs_rport_send_prlo_acc(rport); break; case RPSM_EVENT_FAB_SCN: /* * ignore, wait for NS query response */ break; case RPSM_EVENT_LOGO_RCVD: /* * Not logged-in yet. Accept LOGO. */ bfa_fcs_rport_send_logo_acc(rport); break; case RPSM_EVENT_PLOGI_COMP: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online); bfa_fcxp_discard(rport->fcxp); bfa_fcs_rport_fcs_online_action(rport); break; default: bfa_sm_fault(rport->fcs, event); } } /* * Rport needs to be deleted * waiting for ITNIM clean up to finish */ static void bfa_fcs_rport_sm_fc4_off_delete(struct bfa_fcs_rport_s *rport, enum rport_event event) { bfa_trc(rport->fcs, rport->pwwn); bfa_trc(rport->fcs, rport->pid); bfa_trc(rport->fcs, event); switch (event) { case RPSM_EVENT_FC4_OFFLINE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_delete_pending); bfa_fcs_rport_hal_offline(rport); break; case RPSM_EVENT_DELETE: case RPSM_EVENT_PLOGI_RCVD: /* Ignore these events */ break; default: bfa_sm_fault(rport->fcs, event); break; } } /* * RPort needs to be deleted * waiting for BFA/FW to finish current processing */ static void bfa_fcs_rport_sm_delete_pending(struct bfa_fcs_rport_s *rport, enum rport_event event) { bfa_trc(rport->fcs, rport->pwwn); bfa_trc(rport->fcs, rport->pid); bfa_trc(rport->fcs, event); switch (event) { case RPSM_EVENT_HCB_OFFLINE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); bfa_fcs_rport_free(rport); break; case RPSM_EVENT_DELETE: case RPSM_EVENT_LOGO_IMP: case RPSM_EVENT_PLOGI_RCVD: /* Ignore these events */ break; default: bfa_sm_fault(rport->fcs, event); } } /* * fcs_rport_private FCS RPORT provate functions */ static void bfa_fcs_rport_send_plogi(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced) { struct bfa_fcs_rport_s *rport = rport_cbarg; struct bfa_fcs_lport_s *port = rport->port; struct fchs_s fchs; int len; struct bfa_fcxp_s *fcxp; bfa_trc(rport->fcs, rport->pwwn); fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE); if (!fcxp) { bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe, bfa_fcs_rport_send_plogi, rport, BFA_TRUE); return; } rport->fcxp = fcxp; len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid, bfa_fcs_lport_get_fcid(port), 0, port->port_cfg.pwwn, port->port_cfg.nwwn, bfa_fcport_get_maxfrsize(port->fcs->bfa), bfa_fcport_get_rx_bbcredit(port->fcs->bfa)); bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, bfa_fcs_rport_plogi_response, (void *)rport, FC_MAX_PDUSZ, FC_ELS_TOV); rport->stats.plogis++; bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT); } static void bfa_fcs_rport_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs) { struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg; struct fc_logi_s *plogi_rsp; struct fc_ls_rjt_s *ls_rjt; struct bfa_fcs_rport_s *twin; struct list_head *qe; bfa_trc(rport->fcs, rport->pwwn); /* * Sanity Checks */ if (req_status != BFA_STATUS_OK) { bfa_trc(rport->fcs, req_status); rport->stats.plogi_failed++; bfa_sm_send_event(rport, RPSM_EVENT_FAILED); return; } plogi_rsp = (struct fc_logi_s *) BFA_FCXP_RSP_PLD(fcxp); /* * Check for failure first. */ if (plogi_rsp->els_cmd.els_code != FC_ELS_ACC) { ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp); bfa_trc(rport->fcs, ls_rjt->reason_code); bfa_trc(rport->fcs, ls_rjt->reason_code_expl); if ((ls_rjt->reason_code == FC_LS_RJT_RSN_UNABLE_TO_PERF_CMD) && (ls_rjt->reason_code_expl == FC_LS_RJT_EXP_INSUFF_RES)) { rport->stats.rjt_insuff_res++; bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RETRY); return; } rport->stats.plogi_rejects++; bfa_sm_send_event(rport, RPSM_EVENT_FAILED); return; } /* * PLOGI is complete. Make sure this device is not one of the known * device with a new FC port address. */ list_for_each(qe, &rport->port->rport_q) { twin = (struct bfa_fcs_rport_s *) qe; if (twin == rport) continue; if (!rport->pwwn && (plogi_rsp->port_name == twin->pwwn)) { bfa_trc(rport->fcs, twin->pid); bfa_trc(rport->fcs, rport->pid); /* Update plogi stats in twin */ twin->stats.plogis += rport->stats.plogis; twin->stats.plogi_rejects += rport->stats.plogi_rejects; twin->stats.plogi_timeouts += rport->stats.plogi_timeouts; twin->stats.plogi_failed += rport->stats.plogi_failed; twin->stats.plogi_rcvd += rport->stats.plogi_rcvd; twin->stats.plogi_accs++; bfa_sm_send_event(rport, RPSM_EVENT_DELETE); bfa_fcs_rport_update(twin, plogi_rsp); twin->pid = rsp_fchs->s_id; bfa_sm_send_event(twin, RPSM_EVENT_PLOGI_COMP); return; } } /* * Normal login path -- no evil twins. */ rport->stats.plogi_accs++; bfa_fcs_rport_update(rport, plogi_rsp); bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED); } static void bfa_fcs_rport_send_plogiacc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced) { struct bfa_fcs_rport_s *rport = rport_cbarg; struct bfa_fcs_lport_s *port = rport->port; struct fchs_s fchs; int len; struct bfa_fcxp_s *fcxp; bfa_trc(rport->fcs, rport->pwwn); bfa_trc(rport->fcs, rport->reply_oxid); fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE); if (!fcxp) { bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe, bfa_fcs_rport_send_plogiacc, rport, BFA_FALSE); return; } rport->fcxp = fcxp; len = fc_plogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid, bfa_fcs_lport_get_fcid(port), rport->reply_oxid, port->port_cfg.pwwn, port->port_cfg.nwwn, bfa_fcport_get_maxfrsize(port->fcs->bfa), bfa_fcport_get_rx_bbcredit(port->fcs->bfa)); bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT); } static void bfa_fcs_rport_send_adisc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced) { struct bfa_fcs_rport_s *rport = rport_cbarg; struct bfa_fcs_lport_s *port = rport->port; struct fchs_s fchs; int len; struct bfa_fcxp_s *fcxp; bfa_trc(rport->fcs, rport->pwwn); fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE); if (!fcxp) { bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe, bfa_fcs_rport_send_adisc, rport, BFA_TRUE); return; } rport->fcxp = fcxp; len = fc_adisc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid, bfa_fcs_lport_get_fcid(port), 0, port->port_cfg.pwwn, port->port_cfg.nwwn); bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, bfa_fcs_rport_adisc_response, rport, FC_MAX_PDUSZ, FC_ELS_TOV); rport->stats.adisc_sent++; bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT); } static void bfa_fcs_rport_adisc_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs) { struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg; void *pld = bfa_fcxp_get_rspbuf(fcxp); struct fc_ls_rjt_s *ls_rjt; if (req_status != BFA_STATUS_OK) { bfa_trc(rport->fcs, req_status); rport->stats.adisc_failed++; bfa_sm_send_event(rport, RPSM_EVENT_FAILED); return; } if (fc_adisc_rsp_parse((struct fc_adisc_s *)pld, rsp_len, rport->pwwn, rport->nwwn) == FC_PARSE_OK) { rport->stats.adisc_accs++; bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED); return; } rport->stats.adisc_rejects++; ls_rjt = pld; bfa_trc(rport->fcs, ls_rjt->els_cmd.els_code); bfa_trc(rport->fcs, ls_rjt->reason_code); bfa_trc(rport->fcs, ls_rjt->reason_code_expl); bfa_sm_send_event(rport, RPSM_EVENT_FAILED); } static void bfa_fcs_rport_send_nsdisc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced) { struct bfa_fcs_rport_s *rport = rport_cbarg; struct bfa_fcs_lport_s *port = rport->port; struct fchs_s fchs; struct bfa_fcxp_s *fcxp; int len; bfa_cb_fcxp_send_t cbfn; bfa_trc(rport->fcs, rport->pid); fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE); if (!fcxp) { bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe, bfa_fcs_rport_send_nsdisc, rport, BFA_TRUE); return; } rport->fcxp = fcxp; if (rport->pwwn) { len = fc_gidpn_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), bfa_fcs_lport_get_fcid(port), 0, rport->pwwn); cbfn = bfa_fcs_rport_gidpn_response; } else { len = fc_gpnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), bfa_fcs_lport_get_fcid(port), 0, rport->pid); cbfn = bfa_fcs_rport_gpnid_response; } bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, cbfn, (void *)rport, FC_MAX_PDUSZ, FC_FCCT_TOV); bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT); } static void bfa_fcs_rport_gidpn_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs) { struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg; struct ct_hdr_s *cthdr; struct fcgs_gidpn_resp_s *gidpn_rsp; struct bfa_fcs_rport_s *twin; struct list_head *qe; bfa_trc(rport->fcs, rport->pwwn); cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { /* Check if the pid is the same as before. */ gidpn_rsp = (struct fcgs_gidpn_resp_s *) (cthdr + 1); if (gidpn_rsp->dap == rport->pid) { /* Device is online */ bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED); } else { /* * Device's PID has changed. We need to cleanup * and re-login. If there is another device with * the the newly discovered pid, send an scn notice * so that its new pid can be discovered. */ list_for_each(qe, &rport->port->rport_q) { twin = (struct bfa_fcs_rport_s *) qe; if (twin == rport) continue; if (gidpn_rsp->dap == twin->pid) { bfa_trc(rport->fcs, twin->pid); bfa_trc(rport->fcs, rport->pid); twin->pid = 0; bfa_sm_send_event(twin, RPSM_EVENT_ADDRESS_CHANGE); } } rport->pid = gidpn_rsp->dap; bfa_sm_send_event(rport, RPSM_EVENT_ADDRESS_CHANGE); } return; } /* * Reject Response */ switch (cthdr->reason_code) { case CT_RSN_LOGICAL_BUSY: /* * Need to retry */ bfa_sm_send_event(rport, RPSM_EVENT_TIMEOUT); break; case CT_RSN_UNABLE_TO_PERF: /* * device doesn't exist : Start timer to cleanup this later. */ bfa_sm_send_event(rport, RPSM_EVENT_FAILED); break; default: bfa_sm_send_event(rport, RPSM_EVENT_FAILED); break; } } static void bfa_fcs_rport_gpnid_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs) { struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg; struct ct_hdr_s *cthdr; bfa_trc(rport->fcs, rport->pwwn); cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED); return; } /* * Reject Response */ switch (cthdr->reason_code) { case CT_RSN_LOGICAL_BUSY: /* * Need to retry */ bfa_sm_send_event(rport, RPSM_EVENT_TIMEOUT); break; case CT_RSN_UNABLE_TO_PERF: /* * device doesn't exist : Start timer to cleanup this later. */ bfa_sm_send_event(rport, RPSM_EVENT_FAILED); break; default: bfa_sm_send_event(rport, RPSM_EVENT_FAILED); break; } } /* * Called to send a logout to the rport. */ static void bfa_fcs_rport_send_logo(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced) { struct bfa_fcs_rport_s *rport = rport_cbarg; struct bfa_fcs_lport_s *port; struct fchs_s fchs; struct bfa_fcxp_s *fcxp; u16 len; bfa_trc(rport->fcs, rport->pid); port = rport->port; fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE); if (!fcxp) { bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe, bfa_fcs_rport_send_logo, rport, BFA_FALSE); return; } rport->fcxp = fcxp; len = fc_logo_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid, bfa_fcs_lport_get_fcid(port), 0, bfa_fcs_lport_get_pwwn(port)); bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, rport, FC_MAX_PDUSZ, FC_ELS_TOV); rport->stats.logos++; bfa_fcxp_discard(rport->fcxp); bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT); } /* * Send ACC for a LOGO received. */ static void bfa_fcs_rport_send_logo_acc(void *rport_cbarg) { struct bfa_fcs_rport_s *rport = rport_cbarg; struct bfa_fcs_lport_s *port; struct fchs_s fchs; struct bfa_fcxp_s *fcxp; u16 len; bfa_trc(rport->fcs, rport->pid); port = rport->port; fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE); if (!fcxp) return; rport->stats.logo_rcvd++; len = fc_logo_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid, bfa_fcs_lport_get_fcid(port), rport->reply_oxid); bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); } /* * brief * This routine will be called by bfa_timer on timer timeouts. * * param[in] rport - pointer to bfa_fcs_lport_ns_t. * param[out] rport_status - pointer to return vport status in * * return * void * * Special Considerations: * * note */ static void bfa_fcs_rport_timeout(void *arg) { struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) arg; rport->stats.plogi_timeouts++; bfa_stats(rport->port, rport_plogi_timeouts); bfa_sm_send_event(rport, RPSM_EVENT_TIMEOUT); } static void bfa_fcs_rport_process_prli(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs, u16 len) { struct bfa_fcxp_s *fcxp; struct fchs_s fchs; struct bfa_fcs_lport_s *port = rport->port; struct fc_prli_s *prli; bfa_trc(port->fcs, rx_fchs->s_id); bfa_trc(port->fcs, rx_fchs->d_id); rport->stats.prli_rcvd++; /* * We are in Initiator Mode */ prli = (struct fc_prli_s *) (rx_fchs + 1); if (prli->parampage.servparams.target) { /* * PRLI from a target ? * Send the Acc. * PRLI sent by us will be used to transition the IT nexus, * once the response is received from the target. */ bfa_trc(port->fcs, rx_fchs->s_id); rport->scsi_function = BFA_RPORT_TARGET; } else { bfa_trc(rport->fcs, prli->parampage.type); rport->scsi_function = BFA_RPORT_INITIATOR; bfa_fcs_itnim_is_initiator(rport->itnim); } fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE); if (!fcxp) return; len = fc_prli_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rx_fchs->s_id, bfa_fcs_lport_get_fcid(port), rx_fchs->ox_id, port->port_cfg.roles); bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); } static void bfa_fcs_rport_process_rpsc(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs, u16 len) { struct bfa_fcxp_s *fcxp; struct fchs_s fchs; struct bfa_fcs_lport_s *port = rport->port; struct fc_rpsc_speed_info_s speeds; struct bfa_port_attr_s pport_attr; bfa_trc(port->fcs, rx_fchs->s_id); bfa_trc(port->fcs, rx_fchs->d_id); rport->stats.rpsc_rcvd++; speeds.port_speed_cap = RPSC_SPEED_CAP_1G | RPSC_SPEED_CAP_2G | RPSC_SPEED_CAP_4G | RPSC_SPEED_CAP_8G; /* * get curent speed from pport attributes from BFA */ bfa_fcport_get_attr(port->fcs->bfa, &pport_attr); speeds.port_op_speed = fc_bfa_speed_to_rpsc_operspeed(pport_attr.speed); fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE); if (!fcxp) return; len = fc_rpsc_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rx_fchs->s_id, bfa_fcs_lport_get_fcid(port), rx_fchs->ox_id, &speeds); bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); } static void bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs, u16 len) { struct bfa_fcxp_s *fcxp; struct fchs_s fchs; struct bfa_fcs_lport_s *port = rport->port; bfa_trc(port->fcs, rx_fchs->s_id); bfa_trc(port->fcs, rx_fchs->d_id); rport->stats.adisc_rcvd++; /* * Accept if the itnim for this rport is online. * Else reject the ADISC. */ if (bfa_fcs_itnim_get_online_state(rport->itnim) == BFA_STATUS_OK) { fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE); if (!fcxp) return; len = fc_adisc_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rx_fchs->s_id, bfa_fcs_lport_get_fcid(port), rx_fchs->ox_id, port->port_cfg.pwwn, port->port_cfg.nwwn); bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); } else { rport->stats.adisc_rejected++; bfa_fcs_rport_send_ls_rjt(rport, rx_fchs, FC_LS_RJT_RSN_UNABLE_TO_PERF_CMD, FC_LS_RJT_EXP_LOGIN_REQUIRED); } } static void bfa_fcs_rport_hal_online(struct bfa_fcs_rport_s *rport) { struct bfa_fcs_lport_s *port = rport->port; struct bfa_rport_info_s rport_info; rport_info.pid = rport->pid; rport_info.local_pid = port->pid; rport_info.lp_tag = port->lp_tag; rport_info.vf_id = port->fabric->vf_id; rport_info.vf_en = port->fabric->is_vf; rport_info.fc_class = rport->fc_cos; rport_info.cisc = rport->cisc; rport_info.max_frmsz = rport->maxfrsize; bfa_rport_online(rport->bfa_rport, &rport_info); } static void bfa_fcs_rport_hal_offline(struct bfa_fcs_rport_s *rport) { if (rport->bfa_rport) bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE); else bfa_cb_rport_offline(rport); } static struct bfa_fcs_rport_s * bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid) { struct bfa_fcs_s *fcs = port->fcs; struct bfa_fcs_rport_s *rport; struct bfad_rport_s *rport_drv; /* * allocate rport */ if (fcs->num_rport_logins >= bfa_fcs_rport_max_logins) { bfa_trc(fcs, rpid); return NULL; } if (bfa_fcb_rport_alloc(fcs->bfad, &rport, &rport_drv) != BFA_STATUS_OK) { bfa_trc(fcs, rpid); return NULL; } /* * Initialize r-port */ rport->port = port; rport->fcs = fcs; rport->rp_drv = rport_drv; rport->pid = rpid; rport->pwwn = pwwn; rport->old_pid = 0; rport->bfa_rport = NULL; /* * allocate FC-4s */ WARN_ON(!bfa_fcs_lport_is_initiator(port)); if (bfa_fcs_lport_is_initiator(port)) { rport->itnim = bfa_fcs_itnim_create(rport); if (!rport->itnim) { bfa_trc(fcs, rpid); kfree(rport_drv); return NULL; } } bfa_fcs_lport_add_rport(port, rport); fcs->num_rport_logins++; bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); /* Initialize the Rport Features(RPF) Sub Module */ if (!BFA_FCS_PID_IS_WKA(rport->pid)) bfa_fcs_rpf_init(rport); return rport; } static void bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport) { struct bfa_fcs_lport_s *port = rport->port; struct bfa_fcs_s *fcs = port->fcs; /* * - delete FC-4s * - delete BFA rport * - remove from queue of rports */ rport->plogi_pending = BFA_FALSE; if (bfa_fcs_lport_is_initiator(port)) { bfa_fcs_itnim_delete(rport->itnim); if (rport->pid != 0 && !BFA_FCS_PID_IS_WKA(rport->pid)) bfa_fcs_rpf_rport_offline(rport); } if (rport->bfa_rport) { bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_DELETE); rport->bfa_rport = NULL; } bfa_fcs_lport_del_rport(port, rport); fcs->num_rport_logins--; kfree(rport->rp_drv); } static void bfa_fcs_rport_aen_post(struct bfa_fcs_rport_s *rport, enum bfa_rport_aen_event event, struct bfa_rport_aen_data_s *data) { struct bfa_fcs_lport_s *port = rport->port; struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad; struct bfa_aen_entry_s *aen_entry; bfad_get_aen_entry(bfad, aen_entry); if (!aen_entry) return; if (event == BFA_RPORT_AEN_QOS_PRIO) aen_entry->aen_data.rport.priv.qos = data->priv.qos; else if (event == BFA_RPORT_AEN_QOS_FLOWID) aen_entry->aen_data.rport.priv.qos = data->priv.qos; aen_entry->aen_data.rport.vf_id = rport->port->fabric->vf_id; aen_entry->aen_data.rport.ppwwn = bfa_fcs_lport_get_pwwn( bfa_fcs_get_base_port(rport->fcs)); aen_entry->aen_data.rport.lpwwn = bfa_fcs_lport_get_pwwn(rport->port); aen_entry->aen_data.rport.rpwwn = rport->pwwn; /* Send the AEN notification */ bfad_im_post_vendor_event(aen_entry, bfad, ++rport->fcs->fcs_aen_seq, BFA_AEN_CAT_RPORT, event); } static void bfa_fcs_rport_fcs_online_action(struct bfa_fcs_rport_s *rport) { if ((!rport->pid) || (!rport->pwwn)) { bfa_trc(rport->fcs, rport->pid); bfa_sm_fault(rport->fcs, rport->pid); } bfa_sm_send_event(rport->itnim, BFA_FCS_ITNIM_SM_FCS_ONLINE); } static void bfa_fcs_rport_hal_online_action(struct bfa_fcs_rport_s *rport) { struct bfa_fcs_lport_s *port = rport->port; struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad; char lpwwn_buf[BFA_STRING_32]; char rpwwn_buf[BFA_STRING_32]; rport->stats.onlines++; if ((!rport->pid) || (!rport->pwwn)) { bfa_trc(rport->fcs, rport->pid); bfa_sm_fault(rport->fcs, rport->pid); } if (bfa_fcs_lport_is_initiator(port)) { bfa_fcs_itnim_brp_online(rport->itnim); if (!BFA_FCS_PID_IS_WKA(rport->pid)) bfa_fcs_rpf_rport_online(rport); } wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port)); wwn2str(rpwwn_buf, rport->pwwn); if (!BFA_FCS_PID_IS_WKA(rport->pid)) { BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Remote port (WWN = %s) online for logical port (WWN = %s)\n", rpwwn_buf, lpwwn_buf); bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_ONLINE, NULL); } } static void bfa_fcs_rport_fcs_offline_action(struct bfa_fcs_rport_s *rport) { if (!BFA_FCS_PID_IS_WKA(rport->pid)) bfa_fcs_rpf_rport_offline(rport); bfa_fcs_itnim_rport_offline(rport->itnim); } static void bfa_fcs_rport_hal_offline_action(struct bfa_fcs_rport_s *rport) { struct bfa_fcs_lport_s *port = rport->port; struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad; char lpwwn_buf[BFA_STRING_32]; char rpwwn_buf[BFA_STRING_32]; if (!rport->bfa_rport) { bfa_fcs_rport_fcs_offline_action(rport); return; } rport->stats.offlines++; wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port)); wwn2str(rpwwn_buf, rport->pwwn); if (!BFA_FCS_PID_IS_WKA(rport->pid)) { if (bfa_fcs_lport_is_online(rport->port) == BFA_TRUE) { BFA_LOG(KERN_ERR, bfad, bfa_log_level, "Remote port (WWN = %s) connectivity lost for " "logical port (WWN = %s)\n", rpwwn_buf, lpwwn_buf); bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_DISCONNECT, NULL); } else { BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Remote port (WWN = %s) offlined by " "logical port (WWN = %s)\n", rpwwn_buf, lpwwn_buf); bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_OFFLINE, NULL); } } if (bfa_fcs_lport_is_initiator(port)) { bfa_fcs_itnim_rport_offline(rport->itnim); if (!BFA_FCS_PID_IS_WKA(rport->pid)) bfa_fcs_rpf_rport_offline(rport); } } /* * Update rport parameters from PLOGI or PLOGI accept. */ static void bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi) { bfa_fcs_lport_t *port = rport->port; /* * - port name * - node name */ rport->pwwn = plogi->port_name; rport->nwwn = plogi->node_name; /* * - class of service */ rport->fc_cos = 0; if (plogi->class3.class_valid) rport->fc_cos = FC_CLASS_3; if (plogi->class2.class_valid) rport->fc_cos |= FC_CLASS_2; /* * - CISC * - MAX receive frame size */ rport->cisc = plogi->csp.cisc; if (be16_to_cpu(plogi->class3.rxsz) < be16_to_cpu(plogi->csp.rxsz)) rport->maxfrsize = be16_to_cpu(plogi->class3.rxsz); else rport->maxfrsize = be16_to_cpu(plogi->csp.rxsz); bfa_trc(port->fcs, be16_to_cpu(plogi->csp.bbcred)); bfa_trc(port->fcs, port->fabric->bb_credit); /* * Direct Attach P2P mode : * This is to handle a bug (233476) in IBM targets in Direct Attach * Mode. Basically, in FLOGI Accept the target would have * erroneously set the BB Credit to the value used in the FLOGI * sent by the HBA. It uses the correct value (its own BB credit) * in PLOGI. */ if ((!bfa_fcs_fabric_is_switched(port->fabric)) && (be16_to_cpu(plogi->csp.bbcred) < port->fabric->bb_credit)) { bfa_trc(port->fcs, be16_to_cpu(plogi->csp.bbcred)); bfa_trc(port->fcs, port->fabric->bb_credit); port->fabric->bb_credit = be16_to_cpu(plogi->csp.bbcred); bfa_fcport_set_tx_bbcredit(port->fcs->bfa, port->fabric->bb_credit); } } /* * Called to handle LOGO received from an existing remote port. */ static void bfa_fcs_rport_process_logo(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs) { rport->reply_oxid = fchs->ox_id; bfa_trc(rport->fcs, rport->reply_oxid); rport->prlo = BFA_FALSE; rport->stats.logo_rcvd++; bfa_sm_send_event(rport, RPSM_EVENT_LOGO_RCVD); } /* * fcs_rport_public FCS rport public interfaces */ /* * Called by bport/vport to create a remote port instance for a discovered * remote device. * * @param[in] port - base port or vport * @param[in] rpid - remote port ID * * @return None */ struct bfa_fcs_rport_s * bfa_fcs_rport_create(struct bfa_fcs_lport_s *port, u32 rpid) { struct bfa_fcs_rport_s *rport; bfa_trc(port->fcs, rpid); rport = bfa_fcs_rport_alloc(port, WWN_NULL, rpid); if (!rport) return NULL; bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND); return rport; } /* * Called to create a rport for which only the wwn is known. * * @param[in] port - base port * @param[in] rpwwn - remote port wwn * * @return None */ struct bfa_fcs_rport_s * bfa_fcs_rport_create_by_wwn(struct bfa_fcs_lport_s *port, wwn_t rpwwn) { struct bfa_fcs_rport_s *rport; bfa_trc(port->fcs, rpwwn); rport = bfa_fcs_rport_alloc(port, rpwwn, 0); if (!rport) return NULL; bfa_sm_send_event(rport, RPSM_EVENT_ADDRESS_DISC); return rport; } /* * Called by bport in private loop topology to indicate that a * rport has been discovered and plogi has been completed. * * @param[in] port - base port or vport * @param[in] rpid - remote port ID */ void bfa_fcs_rport_start(struct bfa_fcs_lport_s *port, struct fchs_s *fchs, struct fc_logi_s *plogi) { struct bfa_fcs_rport_s *rport; rport = bfa_fcs_rport_alloc(port, WWN_NULL, fchs->s_id); if (!rport) return; bfa_fcs_rport_update(rport, plogi); bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_COMP); } /* * Called by bport/vport to handle PLOGI received from a new remote port. * If an existing rport does a plogi, it will be handled separately. */ void bfa_fcs_rport_plogi_create(struct bfa_fcs_lport_s *port, struct fchs_s *fchs, struct fc_logi_s *plogi) { struct bfa_fcs_rport_s *rport; rport = bfa_fcs_rport_alloc(port, plogi->port_name, fchs->s_id); if (!rport) return; bfa_fcs_rport_update(rport, plogi); rport->reply_oxid = fchs->ox_id; bfa_trc(rport->fcs, rport->reply_oxid); rport->stats.plogi_rcvd++; bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD); } /* * Called by bport/vport to handle PLOGI received from an existing * remote port. */ void bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs, struct fc_logi_s *plogi) { /* * @todo Handle P2P and initiator-initiator. */ bfa_fcs_rport_update(rport, plogi); rport->reply_oxid = rx_fchs->ox_id; bfa_trc(rport->fcs, rport->reply_oxid); rport->pid = rx_fchs->s_id; bfa_trc(rport->fcs, rport->pid); rport->stats.plogi_rcvd++; bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD); } /* * Called by bport/vport to notify SCN for the remote port */ void bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport) { rport->stats.rscns++; bfa_sm_send_event(rport, RPSM_EVENT_FAB_SCN); } /* * brief * This routine BFA callback for bfa_rport_online() call. * * param[in] cb_arg - rport struct. * * return * void * * Special Considerations: * * note */ void bfa_cb_rport_online(void *cbarg) { struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg; bfa_trc(rport->fcs, rport->pwwn); bfa_sm_send_event(rport, RPSM_EVENT_HCB_ONLINE); } /* * brief * This routine BFA callback for bfa_rport_offline() call. * * param[in] rport - * * return * void * * Special Considerations: * * note */ void bfa_cb_rport_offline(void *cbarg) { struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg; bfa_trc(rport->fcs, rport->pwwn); bfa_sm_send_event(rport, RPSM_EVENT_HCB_OFFLINE); } /* * brief * This routine is a static BFA callback when there is a QoS flow_id * change notification * * param[in] rport - * * return * void * * Special Considerations: * * note */ void bfa_cb_rport_qos_scn_flowid(void *cbarg, struct bfa_rport_qos_attr_s old_qos_attr, struct bfa_rport_qos_attr_s new_qos_attr) { struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg; struct bfa_rport_aen_data_s aen_data; bfa_trc(rport->fcs, rport->pwwn); aen_data.priv.qos = new_qos_attr; bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_QOS_FLOWID, &aen_data); } void bfa_cb_rport_scn_online(struct bfa_s *bfa) { struct bfa_fcs_s *fcs = &((struct bfad_s *)bfa->bfad)->bfa_fcs; struct bfa_fcs_lport_s *port = bfa_fcs_get_base_port(fcs); struct bfa_fcs_rport_s *rp; struct list_head *qe; list_for_each(qe, &port->rport_q) { rp = (struct bfa_fcs_rport_s *) qe; bfa_sm_send_event(rp, RPSM_EVENT_SCN_ONLINE); rp->scn_online = BFA_TRUE; } if (bfa_fcs_lport_is_online(port)) bfa_fcs_lport_lip_scn_online(port); } void bfa_cb_rport_scn_no_dev(void *rport) { struct bfa_fcs_rport_s *rp = rport; bfa_sm_send_event(rp, RPSM_EVENT_SCN_OFFLINE); rp->scn_online = BFA_FALSE; } void bfa_cb_rport_scn_offline(struct bfa_s *bfa) { struct bfa_fcs_s *fcs = &((struct bfad_s *)bfa->bfad)->bfa_fcs; struct bfa_fcs_lport_s *port = bfa_fcs_get_base_port(fcs); struct bfa_fcs_rport_s *rp; struct list_head *qe; list_for_each(qe, &port->rport_q) { rp = (struct bfa_fcs_rport_s *) qe; bfa_sm_send_event(rp, RPSM_EVENT_SCN_OFFLINE); rp->scn_online = BFA_FALSE; } } /* * brief * This routine is a static BFA callback when there is a QoS priority * change notification * * param[in] rport - * * return * void * * Special Considerations: * * note */ void bfa_cb_rport_qos_scn_prio(void *cbarg, struct bfa_rport_qos_attr_s old_qos_attr, struct bfa_rport_qos_attr_s new_qos_attr) { struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg; struct bfa_rport_aen_data_s aen_data; bfa_trc(rport->fcs, rport->pwwn); aen_data.priv.qos = new_qos_attr; bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_QOS_PRIO, &aen_data); } /* * Called to process any unsolicted frames from this remote port */ void bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs, u16 len) { struct bfa_fcs_lport_s *port = rport->port; struct fc_els_cmd_s *els_cmd; bfa_trc(rport->fcs, fchs->s_id); bfa_trc(rport->fcs, fchs->d_id); bfa_trc(rport->fcs, fchs->type); if (fchs->type != FC_TYPE_ELS) return; els_cmd = (struct fc_els_cmd_s *) (fchs + 1); bfa_trc(rport->fcs, els_cmd->els_code); switch (els_cmd->els_code) { case FC_ELS_LOGO: bfa_stats(port, plogi_rcvd); bfa_fcs_rport_process_logo(rport, fchs); break; case FC_ELS_ADISC: bfa_stats(port, adisc_rcvd); bfa_fcs_rport_process_adisc(rport, fchs, len); break; case FC_ELS_PRLO: bfa_stats(port, prlo_rcvd); if (bfa_fcs_lport_is_initiator(port)) bfa_fcs_fcpim_uf_recv(rport->itnim, fchs, len); break; case FC_ELS_PRLI: bfa_stats(port, prli_rcvd); bfa_fcs_rport_process_prli(rport, fchs, len); break; case FC_ELS_RPSC: bfa_stats(port, rpsc_rcvd); bfa_fcs_rport_process_rpsc(rport, fchs, len); break; default: bfa_stats(port, un_handled_els_rcvd); bfa_fcs_rport_send_ls_rjt(rport, fchs, FC_LS_RJT_RSN_CMD_NOT_SUPP, FC_LS_RJT_EXP_NO_ADDL_INFO); break; } } /* send best case acc to prlo */ static void bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport) { struct bfa_fcs_lport_s *port = rport->port; struct fchs_s fchs; struct bfa_fcxp_s *fcxp; int len; bfa_trc(rport->fcs, rport->pid); fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE); if (!fcxp) return; len = fc_prlo_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid, bfa_fcs_lport_get_fcid(port), rport->reply_oxid, 0); bfa_fcxp_send(fcxp, rport->bfa_rport, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); } /* * Send a LS reject */ static void bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs, u8 reason_code, u8 reason_code_expl) { struct bfa_fcs_lport_s *port = rport->port; struct fchs_s fchs; struct bfa_fcxp_s *fcxp; int len; bfa_trc(rport->fcs, rx_fchs->s_id); fcxp = bfa_fcs_fcxp_alloc(rport->fcs, BFA_FALSE); if (!fcxp) return; len = fc_ls_rjt_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rx_fchs->s_id, bfa_fcs_lport_get_fcid(port), rx_fchs->ox_id, reason_code, reason_code_expl); bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); } /* * Return state of rport. */ int bfa_fcs_rport_get_state(struct bfa_fcs_rport_s *rport) { return bfa_sm_to_state(rport_sm_table, rport->sm); } /* * brief * Called by the Driver to set rport delete/ageout timeout * * param[in] rport timeout value in seconds. * * return None */ void bfa_fcs_rport_set_del_timeout(u8 rport_tmo) { /* convert to Millisecs */ if (rport_tmo > 0) bfa_fcs_rport_del_timeout = rport_tmo * 1000; } void bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, __be16 ox_id) { bfa_trc(rport->fcs, rport->pid); rport->prlo = BFA_TRUE; rport->reply_oxid = ox_id; bfa_sm_send_event(rport, RPSM_EVENT_PRLO_RCVD); } /* * Called by BFAD to set the max limit on number of bfa_fcs_rport allocation * which limits number of concurrent logins to remote ports */ void bfa_fcs_rport_set_max_logins(u32 max_logins) { if (max_logins > 0) bfa_fcs_rport_max_logins = max_logins; } void bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport, struct bfa_rport_attr_s *rport_attr) { struct bfa_rport_qos_attr_s qos_attr; struct bfa_fcs_lport_s *port = rport->port; bfa_port_speed_t rport_speed = rport->rpf.rpsc_speed; struct bfa_port_attr_s port_attr; bfa_fcport_get_attr(rport->fcs->bfa, &port_attr); memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s)); memset(&qos_attr, 0, sizeof(struct bfa_rport_qos_attr_s)); rport_attr->pid = rport->pid; rport_attr->pwwn = rport->pwwn; rport_attr->nwwn = rport->nwwn; rport_attr->cos_supported = rport->fc_cos; rport_attr->df_sz = rport->maxfrsize; rport_attr->state = bfa_fcs_rport_get_state(rport); rport_attr->fc_cos = rport->fc_cos; rport_attr->cisc = rport->cisc; rport_attr->scsi_function = rport->scsi_function; rport_attr->curr_speed = rport->rpf.rpsc_speed; rport_attr->assigned_speed = rport->rpf.assigned_speed; if (rport->bfa_rport) { qos_attr.qos_priority = rport->bfa_rport->qos_attr.qos_priority; qos_attr.qos_flow_id = cpu_to_be32(rport->bfa_rport->qos_attr.qos_flow_id); } rport_attr->qos_attr = qos_attr; rport_attr->trl_enforced = BFA_FALSE; if (bfa_fcport_is_ratelim(port->fcs->bfa) && (rport->scsi_function == BFA_RPORT_TARGET)) { if (rport_speed == BFA_PORT_SPEED_UNKNOWN) rport_speed = bfa_fcport_get_ratelim_speed(rport->fcs->bfa); if ((bfa_fcs_lport_get_rport_max_speed(port) != BFA_PORT_SPEED_UNKNOWN) && (rport_speed < port_attr.speed)) rport_attr->trl_enforced = BFA_TRUE; } } /* * Remote port implementation. */ /* * fcs_rport_api FCS rport API. */ struct bfa_fcs_rport_s * bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port, wwn_t rpwwn) { struct bfa_fcs_rport_s *rport; rport = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn); if (rport == NULL) { /* * TBD Error handling */ } return rport; } struct bfa_fcs_rport_s * bfa_fcs_rport_lookup_by_nwwn(struct bfa_fcs_lport_s *port, wwn_t rnwwn) { struct bfa_fcs_rport_s *rport; rport = bfa_fcs_lport_get_rport_by_nwwn(port, rnwwn); if (rport == NULL) { /* * TBD Error handling */ } return rport; } /* * Remote port features (RPF) implementation. */ #define BFA_FCS_RPF_RETRIES (3) #define BFA_FCS_RPF_RETRY_TIMEOUT (1000) /* 1 sec (In millisecs) */ static void bfa_fcs_rpf_send_rpsc2(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced); static void bfa_fcs_rpf_rpsc2_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs); static void bfa_fcs_rpf_timeout(void *arg); /* * fcs_rport_ftrs_sm FCS rport state machine events */ enum rpf_event { RPFSM_EVENT_RPORT_OFFLINE = 1, /* Rport offline */ RPFSM_EVENT_RPORT_ONLINE = 2, /* Rport online */ RPFSM_EVENT_FCXP_SENT = 3, /* Frame from has been sent */ RPFSM_EVENT_TIMEOUT = 4, /* Rport SM timeout event */ RPFSM_EVENT_RPSC_COMP = 5, RPFSM_EVENT_RPSC_FAIL = 6, RPFSM_EVENT_RPSC_ERROR = 7, }; static void bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf, enum rpf_event event); static void bfa_fcs_rpf_sm_rpsc_sending(struct bfa_fcs_rpf_s *rpf, enum rpf_event event); static void bfa_fcs_rpf_sm_rpsc(struct bfa_fcs_rpf_s *rpf, enum rpf_event event); static void bfa_fcs_rpf_sm_rpsc_retry(struct bfa_fcs_rpf_s *rpf, enum rpf_event event); static void bfa_fcs_rpf_sm_offline(struct bfa_fcs_rpf_s *rpf, enum rpf_event event); static void bfa_fcs_rpf_sm_online(struct bfa_fcs_rpf_s *rpf, enum rpf_event event); static void bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) { struct bfa_fcs_rport_s *rport = rpf->rport; struct bfa_fcs_fabric_s *fabric = &rport->fcs->fabric; bfa_trc(rport->fcs, rport->pwwn); bfa_trc(rport->fcs, rport->pid); bfa_trc(rport->fcs, event); switch (event) { case RPFSM_EVENT_RPORT_ONLINE: /* Send RPSC2 to a Brocade fabric only. */ if ((!BFA_FCS_PID_IS_WKA(rport->pid)) && ((rport->port->fabric->lps->brcd_switch) || (bfa_fcs_fabric_get_switch_oui(fabric) == BFA_FCS_BRCD_SWITCH_OUI))) { bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending); rpf->rpsc_retries = 0; bfa_fcs_rpf_send_rpsc2(rpf, NULL); } break; case RPFSM_EVENT_RPORT_OFFLINE: break; default: bfa_sm_fault(rport->fcs, event); } } static void bfa_fcs_rpf_sm_rpsc_sending(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) { struct bfa_fcs_rport_s *rport = rpf->rport; bfa_trc(rport->fcs, event); switch (event) { case RPFSM_EVENT_FCXP_SENT: bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc); break; case RPFSM_EVENT_RPORT_OFFLINE: bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline); bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rpf->fcxp_wqe); rpf->rpsc_retries = 0; break; default: bfa_sm_fault(rport->fcs, event); } } static void bfa_fcs_rpf_sm_rpsc(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) { struct bfa_fcs_rport_s *rport = rpf->rport; bfa_trc(rport->fcs, rport->pid); bfa_trc(rport->fcs, event); switch (event) { case RPFSM_EVENT_RPSC_COMP: bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online); /* Update speed info in f/w via BFA */ if (rpf->rpsc_speed != BFA_PORT_SPEED_UNKNOWN) bfa_rport_speed(rport->bfa_rport, rpf->rpsc_speed); else if (rpf->assigned_speed != BFA_PORT_SPEED_UNKNOWN) bfa_rport_speed(rport->bfa_rport, rpf->assigned_speed); break; case RPFSM_EVENT_RPSC_FAIL: /* RPSC not supported by rport */ bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online); break; case RPFSM_EVENT_RPSC_ERROR: /* need to retry...delayed a bit. */ if (rpf->rpsc_retries++ < BFA_FCS_RPF_RETRIES) { bfa_timer_start(rport->fcs->bfa, &rpf->timer, bfa_fcs_rpf_timeout, rpf, BFA_FCS_RPF_RETRY_TIMEOUT); bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_retry); } else { bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online); } break; case RPFSM_EVENT_RPORT_OFFLINE: bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline); bfa_fcxp_discard(rpf->fcxp); rpf->rpsc_retries = 0; break; default: bfa_sm_fault(rport->fcs, event); } } static void bfa_fcs_rpf_sm_rpsc_retry(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) { struct bfa_fcs_rport_s *rport = rpf->rport; bfa_trc(rport->fcs, rport->pid); bfa_trc(rport->fcs, event); switch (event) { case RPFSM_EVENT_TIMEOUT: /* re-send the RPSC */ bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending); bfa_fcs_rpf_send_rpsc2(rpf, NULL); break; case RPFSM_EVENT_RPORT_OFFLINE: bfa_timer_stop(&rpf->timer); bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline); rpf->rpsc_retries = 0; break; default: bfa_sm_fault(rport->fcs, event); } } static void bfa_fcs_rpf_sm_online(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) { struct bfa_fcs_rport_s *rport = rpf->rport; bfa_trc(rport->fcs, rport->pwwn); bfa_trc(rport->fcs, rport->pid); bfa_trc(rport->fcs, event); switch (event) { case RPFSM_EVENT_RPORT_OFFLINE: bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline); rpf->rpsc_retries = 0; break; default: bfa_sm_fault(rport->fcs, event); } } static void bfa_fcs_rpf_sm_offline(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) { struct bfa_fcs_rport_s *rport = rpf->rport; bfa_trc(rport->fcs, rport->pwwn); bfa_trc(rport->fcs, rport->pid); bfa_trc(rport->fcs, event); switch (event) { case RPFSM_EVENT_RPORT_ONLINE: bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending); bfa_fcs_rpf_send_rpsc2(rpf, NULL); break; case RPFSM_EVENT_RPORT_OFFLINE: break; default: bfa_sm_fault(rport->fcs, event); } } /* * Called when Rport is created. */ void bfa_fcs_rpf_init(struct bfa_fcs_rport_s *rport) { struct bfa_fcs_rpf_s *rpf = &rport->rpf; bfa_trc(rport->fcs, rport->pid); rpf->rport = rport; bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_uninit); } /* * Called when Rport becomes online */ void bfa_fcs_rpf_rport_online(struct bfa_fcs_rport_s *rport) { bfa_trc(rport->fcs, rport->pid); if (__fcs_min_cfg(rport->port->fcs)) return; if (bfa_fcs_fabric_is_switched(rport->port->fabric)) bfa_sm_send_event(&rport->rpf, RPFSM_EVENT_RPORT_ONLINE); } /* * Called when Rport becomes offline */ void bfa_fcs_rpf_rport_offline(struct bfa_fcs_rport_s *rport) { bfa_trc(rport->fcs, rport->pid); if (__fcs_min_cfg(rport->port->fcs)) return; rport->rpf.rpsc_speed = 0; bfa_sm_send_event(&rport->rpf, RPFSM_EVENT_RPORT_OFFLINE); } static void bfa_fcs_rpf_timeout(void *arg) { struct bfa_fcs_rpf_s *rpf = (struct bfa_fcs_rpf_s *) arg; struct bfa_fcs_rport_s *rport = rpf->rport; bfa_trc(rport->fcs, rport->pid); bfa_sm_send_event(rpf, RPFSM_EVENT_TIMEOUT); } static void bfa_fcs_rpf_send_rpsc2(void *rpf_cbarg, struct bfa_fcxp_s *fcxp_alloced) { struct bfa_fcs_rpf_s *rpf = (struct bfa_fcs_rpf_s *)rpf_cbarg; struct bfa_fcs_rport_s *rport = rpf->rport; struct bfa_fcs_lport_s *port = rport->port; struct fchs_s fchs; int len; struct bfa_fcxp_s *fcxp; bfa_trc(rport->fcs, rport->pwwn); fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE); if (!fcxp) { bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rpf->fcxp_wqe, bfa_fcs_rpf_send_rpsc2, rpf, BFA_TRUE); return; } rpf->fcxp = fcxp; len = fc_rpsc2_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid, bfa_fcs_lport_get_fcid(port), &rport->pid, 1); bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, bfa_fcs_rpf_rpsc2_response, rpf, FC_MAX_PDUSZ, FC_ELS_TOV); rport->stats.rpsc_sent++; bfa_sm_send_event(rpf, RPFSM_EVENT_FCXP_SENT); } static void bfa_fcs_rpf_rpsc2_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs) { struct bfa_fcs_rpf_s *rpf = (struct bfa_fcs_rpf_s *) cbarg; struct bfa_fcs_rport_s *rport = rpf->rport; struct fc_ls_rjt_s *ls_rjt; struct fc_rpsc2_acc_s *rpsc2_acc; u16 num_ents; bfa_trc(rport->fcs, req_status); if (req_status != BFA_STATUS_OK) { bfa_trc(rport->fcs, req_status); if (req_status == BFA_STATUS_ETIMER) rport->stats.rpsc_failed++; bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR); return; } rpsc2_acc = (struct fc_rpsc2_acc_s *) BFA_FCXP_RSP_PLD(fcxp); if (rpsc2_acc->els_cmd == FC_ELS_ACC) { rport->stats.rpsc_accs++; num_ents = be16_to_cpu(rpsc2_acc->num_pids); bfa_trc(rport->fcs, num_ents); if (num_ents > 0) { WARN_ON(be32_to_cpu(rpsc2_acc->port_info[0].pid) != bfa_ntoh3b(rport->pid)); bfa_trc(rport->fcs, be32_to_cpu(rpsc2_acc->port_info[0].pid)); bfa_trc(rport->fcs, be16_to_cpu(rpsc2_acc->port_info[0].speed)); bfa_trc(rport->fcs, be16_to_cpu(rpsc2_acc->port_info[0].index)); bfa_trc(rport->fcs, rpsc2_acc->port_info[0].type); if (rpsc2_acc->port_info[0].speed == 0) { bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR); return; } rpf->rpsc_speed = fc_rpsc_operspeed_to_bfa_speed( be16_to_cpu(rpsc2_acc->port_info[0].speed)); bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_COMP); } } else { ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp); bfa_trc(rport->fcs, ls_rjt->reason_code); bfa_trc(rport->fcs, ls_rjt->reason_code_expl); rport->stats.rpsc_rejects++; if (ls_rjt->reason_code == FC_LS_RJT_RSN_CMD_NOT_SUPP) bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_FAIL); else bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR); } }
linux-master
drivers/scsi/bfa/bfa_fcs_rport.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. * Copyright (c) 2014- QLogic Corporation. * All rights reserved * www.qlogic.com * * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. */ #include "bfad_drv.h" #include "bfa_ioc.h" #include "bfi_reg.h" #include "bfa_defs.h" BFA_TRC_FILE(CNA, IOC_CT); #define bfa_ioc_ct_sync_pos(__ioc) \ ((uint32_t) (1 << bfa_ioc_pcifn(__ioc))) #define BFA_IOC_SYNC_REQD_SH 16 #define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff) #define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000) #define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH) #define bfa_ioc_ct_sync_reqd_pos(__ioc) \ (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH) /* * forward declarations */ static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc); static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc); static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc); static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc); static bfa_boolean_t bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc); static void bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc); static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc); static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc); static bfa_boolean_t bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc); static void bfa_ioc_ct_set_cur_ioc_fwstate( struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate); static enum bfi_ioc_state bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc); static void bfa_ioc_ct_set_alt_ioc_fwstate( struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate); static enum bfi_ioc_state bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc); static struct bfa_ioc_hwif_s hwif_ct; static struct bfa_ioc_hwif_s hwif_ct2; /* * Return true if firmware of current driver matches the running firmware. */ static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc) { enum bfi_ioc_state ioc_fwstate; u32 usecnt; struct bfi_ioc_image_hdr_s fwhdr; bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); usecnt = readl(ioc->ioc_regs.ioc_usage_reg); /* * If usage count is 0, always return TRUE. */ if (usecnt == 0) { writel(1, ioc->ioc_regs.ioc_usage_reg); readl(ioc->ioc_regs.ioc_usage_sem_reg); writel(1, ioc->ioc_regs.ioc_usage_sem_reg); writel(0, ioc->ioc_regs.ioc_fail_sync); bfa_trc(ioc, usecnt); return BFA_TRUE; } ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate); bfa_trc(ioc, ioc_fwstate); /* * Use count cannot be non-zero and chip in uninitialized state. */ WARN_ON(ioc_fwstate == BFI_IOC_UNINIT); /* * Check if another driver with a different firmware is active */ bfa_ioc_fwver_get(ioc, &fwhdr); if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) { readl(ioc->ioc_regs.ioc_usage_sem_reg); writel(1, ioc->ioc_regs.ioc_usage_sem_reg); bfa_trc(ioc, usecnt); return BFA_FALSE; } /* * Same firmware version. Increment the reference count. */ usecnt++; writel(usecnt, ioc->ioc_regs.ioc_usage_reg); readl(ioc->ioc_regs.ioc_usage_sem_reg); writel(1, ioc->ioc_regs.ioc_usage_sem_reg); bfa_trc(ioc, usecnt); return BFA_TRUE; } static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc) { u32 usecnt; /* * decrement usage count */ bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); usecnt = readl(ioc->ioc_regs.ioc_usage_reg); WARN_ON(usecnt <= 0); usecnt--; writel(usecnt, ioc->ioc_regs.ioc_usage_reg); bfa_trc(ioc, usecnt); readl(ioc->ioc_regs.ioc_usage_sem_reg); writel(1, ioc->ioc_regs.ioc_usage_sem_reg); } /* * Notify other functions on HB failure. */ static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc) { if (bfa_ioc_is_cna(ioc)) { writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt); writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt); /* Wait for halt to take effect */ readl(ioc->ioc_regs.ll_halt); readl(ioc->ioc_regs.alt_ll_halt); } else { writel(~0U, ioc->ioc_regs.err_set); readl(ioc->ioc_regs.err_set); } } /* * Host to LPU mailbox message addresses */ static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } ct_fnreg[] = { { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 }, { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 }, { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 }, { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 } }; /* * Host <-> LPU mailbox command/status registers - port 0 */ static struct { u32 hfn, lpu; } ct_p0reg[] = { { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT }, { HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT }, { HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT }, { HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT } }; /* * Host <-> LPU mailbox command/status registers - port 1 */ static struct { u32 hfn, lpu; } ct_p1reg[] = { { HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT }, { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT }, { HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT }, { HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT } }; static struct { uint32_t hfn_mbox, lpu_mbox, hfn_pgn, hfn, lpu, lpu_read; } ct2_reg[] = { { CT2_HOSTFN_LPU0_MBOX0, CT2_LPU0_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM, CT2_HOSTFN_LPU0_CMD_STAT, CT2_LPU0_HOSTFN_CMD_STAT, CT2_HOSTFN_LPU0_READ_STAT}, { CT2_HOSTFN_LPU1_MBOX0, CT2_LPU1_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM, CT2_HOSTFN_LPU1_CMD_STAT, CT2_LPU1_HOSTFN_CMD_STAT, CT2_HOSTFN_LPU1_READ_STAT}, }; static void bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc) { void __iomem *rb; int pcifn = bfa_ioc_pcifn(ioc); rb = bfa_ioc_bar0(ioc); ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox; ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox; ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn; if (ioc->port_id == 0) { ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG; ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn; ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu; ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1; } else { ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG); ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG); ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG; ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn; ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu; ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0; } /* * PSS control registers */ ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG); ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG); ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_LCLK_CTL_REG); ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_SCLK_CTL_REG); /* * IOC semaphore registers and serialization */ ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG); ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG); ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG); ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT); ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC); /* * sram memory access */ ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START); ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT; /* * err set reg : for notification of hb failure in fcmode */ ioc->ioc_regs.err_set = (rb + ERR_SET_REG); } static void bfa_ioc_ct2_reg_init(struct bfa_ioc_s *ioc) { void __iomem *rb; int port = bfa_ioc_portid(ioc); rb = bfa_ioc_bar0(ioc); ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox; ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox; ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn; ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn; ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu; ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read; if (port == 0) { ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG; ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG; ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG; ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1; } else { ioc->ioc_regs.heartbeat = (rb + CT2_BFA_IOC1_HBEAT_REG); ioc->ioc_regs.ioc_fwstate = (rb + CT2_BFA_IOC1_STATE_REG); ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG; ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0; } /* * PSS control registers */ ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG); ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG); ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + CT2_APP_PLL_LCLK_CTL_REG); ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + CT2_APP_PLL_SCLK_CTL_REG); /* * IOC semaphore registers and serialization */ ioc->ioc_regs.ioc_sem_reg = (rb + CT2_HOST_SEM0_REG); ioc->ioc_regs.ioc_usage_sem_reg = (rb + CT2_HOST_SEM1_REG); ioc->ioc_regs.ioc_init_sem_reg = (rb + CT2_HOST_SEM2_REG); ioc->ioc_regs.ioc_usage_reg = (rb + CT2_BFA_FW_USE_COUNT); ioc->ioc_regs.ioc_fail_sync = (rb + CT2_BFA_IOC_FAIL_SYNC); /* * sram memory access */ ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START); ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT; /* * err set reg : for notification of hb failure in fcmode */ ioc->ioc_regs.err_set = (rb + ERR_SET_REG); } /* * Initialize IOC to port mapping. */ #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8) static void bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc) { void __iomem *rb = ioc->pcidev.pci_bar_kva; u32 r32; /* * For catapult, base port id on personality register and IOC type */ r32 = readl(rb + FNC_PERS_REG); r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)); ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH; bfa_trc(ioc, bfa_ioc_pcifn(ioc)); bfa_trc(ioc, ioc->port_id); } static void bfa_ioc_ct2_map_port(struct bfa_ioc_s *ioc) { void __iomem *rb = ioc->pcidev.pci_bar_kva; u32 r32; r32 = readl(rb + CT2_HOSTFN_PERSONALITY0); ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH); bfa_trc(ioc, bfa_ioc_pcifn(ioc)); bfa_trc(ioc, ioc->port_id); } /* * Set interrupt mode for a function: INTX or MSIX */ static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix) { void __iomem *rb = ioc->pcidev.pci_bar_kva; u32 r32, mode; r32 = readl(rb + FNC_PERS_REG); bfa_trc(ioc, r32); mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) & __F0_INTX_STATUS; /* * If already in desired mode, do not change anything */ if ((!msix && mode) || (msix && !mode)) return; if (msix) mode = __F0_INTX_STATUS_MSIX; else mode = __F0_INTX_STATUS_INTA; r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))); r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))); bfa_trc(ioc, r32); writel(r32, rb + FNC_PERS_REG); } static bfa_boolean_t bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc_s *ioc) { u32 r32; r32 = readl(ioc->ioc_regs.lpu_read_stat); if (r32) { writel(1, ioc->ioc_regs.lpu_read_stat); return BFA_TRUE; } return BFA_FALSE; } /* * Cleanup hw semaphore and usecnt registers */ static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc) { bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); writel(0, ioc->ioc_regs.ioc_usage_reg); readl(ioc->ioc_regs.ioc_usage_sem_reg); writel(1, ioc->ioc_regs.ioc_usage_sem_reg); writel(0, ioc->ioc_regs.ioc_fail_sync); /* * Read the hw sem reg to make sure that it is locked * before we clear it. If it is not locked, writing 1 * will lock it instead of clearing it. */ readl(ioc->ioc_regs.ioc_sem_reg); writel(1, ioc->ioc_regs.ioc_sem_reg); } static bfa_boolean_t bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc) { uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync); uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32); /* * Driver load time. If the sync required bit for this PCI fn * is set, it is due to an unclean exit by the driver for this * PCI fn in the previous incarnation. Whoever comes here first * should clean it up, no matter which PCI fn. */ if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) { writel(0, ioc->ioc_regs.ioc_fail_sync); writel(1, ioc->ioc_regs.ioc_usage_reg); writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate); writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate); return BFA_TRUE; } return bfa_ioc_ct_sync_complete(ioc); } /* * Synchronized IOC failure processing routines */ static void bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc) { uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync); uint32_t sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc); writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync); } static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc) { uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync); uint32_t sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) | bfa_ioc_ct_sync_pos(ioc); writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync); } static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc) { uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync); writel((r32 | bfa_ioc_ct_sync_pos(ioc)), ioc->ioc_regs.ioc_fail_sync); } static bfa_boolean_t bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc) { uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync); uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32); uint32_t sync_ackd = bfa_ioc_ct_get_sync_ackd(r32); uint32_t tmp_ackd; if (sync_ackd == 0) return BFA_TRUE; /* * The check below is to see whether any other PCI fn * has reinitialized the ASIC (reset sync_ackd bits) * and failed again while this IOC was waiting for hw * semaphore (in bfa_iocpf_sm_semwait()). */ tmp_ackd = sync_ackd; if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) && !(sync_ackd & bfa_ioc_ct_sync_pos(ioc))) sync_ackd |= bfa_ioc_ct_sync_pos(ioc); if (sync_reqd == sync_ackd) { writel(bfa_ioc_ct_clear_sync_ackd(r32), ioc->ioc_regs.ioc_fail_sync); writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate); return BFA_TRUE; } /* * If another PCI fn reinitialized and failed again while * this IOC was waiting for hw sem, the sync_ackd bit for * this IOC need to be set again to allow reinitialization. */ if (tmp_ackd != sync_ackd) writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync); return BFA_FALSE; } /* * Called from bfa_ioc_attach() to map asic specific calls. */ static void bfa_ioc_set_ctx_hwif(struct bfa_ioc_s *ioc, struct bfa_ioc_hwif_s *hwif) { hwif->ioc_firmware_lock = bfa_ioc_ct_firmware_lock; hwif->ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock; hwif->ioc_notify_fail = bfa_ioc_ct_notify_fail; hwif->ioc_ownership_reset = bfa_ioc_ct_ownership_reset; hwif->ioc_sync_start = bfa_ioc_ct_sync_start; hwif->ioc_sync_join = bfa_ioc_ct_sync_join; hwif->ioc_sync_leave = bfa_ioc_ct_sync_leave; hwif->ioc_sync_ack = bfa_ioc_ct_sync_ack; hwif->ioc_sync_complete = bfa_ioc_ct_sync_complete; hwif->ioc_set_fwstate = bfa_ioc_ct_set_cur_ioc_fwstate; hwif->ioc_get_fwstate = bfa_ioc_ct_get_cur_ioc_fwstate; hwif->ioc_set_alt_fwstate = bfa_ioc_ct_set_alt_ioc_fwstate; hwif->ioc_get_alt_fwstate = bfa_ioc_ct_get_alt_ioc_fwstate; } /* * Called from bfa_ioc_attach() to map asic specific calls. */ void bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc) { bfa_ioc_set_ctx_hwif(ioc, &hwif_ct); hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init; hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init; hwif_ct.ioc_map_port = bfa_ioc_ct_map_port; hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set; ioc->ioc_hwif = &hwif_ct; } /* * Called from bfa_ioc_attach() to map asic specific calls. */ void bfa_ioc_set_ct2_hwif(struct bfa_ioc_s *ioc) { bfa_ioc_set_ctx_hwif(ioc, &hwif_ct2); hwif_ct2.ioc_pll_init = bfa_ioc_ct2_pll_init; hwif_ct2.ioc_reg_init = bfa_ioc_ct2_reg_init; hwif_ct2.ioc_map_port = bfa_ioc_ct2_map_port; hwif_ct2.ioc_lpu_read_stat = bfa_ioc_ct2_lpu_read_stat; hwif_ct2.ioc_isr_mode_set = NULL; ioc->ioc_hwif = &hwif_ct2; } /* * Workaround for MSI-X resource allocation for catapult-2 with no asic block */ #define HOSTFN_MSIX_DEFAULT 64 #define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138 #define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c #define __MSIX_VT_NUMVT__MK 0x003ff800 #define __MSIX_VT_NUMVT__SH 11 #define __MSIX_VT_NUMVT_(_v) ((_v) << __MSIX_VT_NUMVT__SH) #define __MSIX_VT_OFST_ 0x000007ff void bfa_ioc_ct2_poweron(struct bfa_ioc_s *ioc) { void __iomem *rb = ioc->pcidev.pci_bar_kva; u32 r32; r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT); if (r32 & __MSIX_VT_NUMVT__MK) { writel(r32 & __MSIX_VT_OFST_, rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR); return; } writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT - 1) | HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc), rb + HOSTFN_MSIX_VT_OFST_NUMVT); writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc), rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR); } bfa_status_t bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode mode) { u32 pll_sclk, pll_fclk, r32; bfa_boolean_t fcmode = (mode == BFI_ASIC_MODE_FC); pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) | __APP_PLL_SCLK_JITLMT0_1(3U) | __APP_PLL_SCLK_CNTLMT0_1(1U); pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) | __APP_PLL_LCLK_JITLMT0_1(3U) | __APP_PLL_LCLK_CNTLMT0_1(1U); if (fcmode) { writel(0, (rb + OP_MODE)); writel(__APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2 | __APP_EMS_CHANNEL_SEL, (rb + ETH_MAC_SER_REG)); } else { writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE)); writel(__APP_EMS_REFCKBUFEN1, (rb + ETH_MAC_SER_REG)); } writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG)); writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG)); writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET, rb + APP_PLL_SCLK_CTL_REG); writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET, rb + APP_PLL_LCLK_CTL_REG); writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET | __APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG); writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET | __APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG); readl(rb + HOSTFN0_INT_MSK); udelay(2000); writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); writel(pll_sclk | __APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG); writel(pll_fclk | __APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG); if (!fcmode) { writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0)); writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1)); } r32 = readl((rb + PSS_CTL_REG)); r32 &= ~__PSS_LMEM_RESET; writel(r32, (rb + PSS_CTL_REG)); udelay(1000); if (!fcmode) { writel(0, (rb + PMM_1T_RESET_REG_P0)); writel(0, (rb + PMM_1T_RESET_REG_P1)); } writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG)); udelay(1000); r32 = readl((rb + MBIST_STAT_REG)); writel(0, (rb + MBIST_CTL_REG)); return BFA_STATUS_OK; } static void bfa_ioc_ct2_sclk_init(void __iomem *rb) { u32 r32; /* * put s_clk PLL and PLL FSM in reset */ r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN); r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS | __APP_PLL_SCLK_LOGIC_SOFT_RESET); writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG)); /* * Ignore mode and program for the max clock (which is FC16) * Firmware/NFC will do the PLL init appropiately */ r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2); writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG)); /* * while doing PLL init dont clock gate ethernet subsystem */ r32 = readl((rb + CT2_CHIP_MISC_PRG)); writel(r32 | __ETH_CLK_ENABLE_PORT0, (rb + CT2_CHIP_MISC_PRG)); r32 = readl((rb + CT2_PCIE_MISC_REG)); writel(r32 | __ETH_CLK_ENABLE_PORT1, (rb + CT2_PCIE_MISC_REG)); /* * set sclk value */ r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2); writel(r32 | 0x1061731b, (rb + CT2_APP_PLL_SCLK_CTL_REG)); /* * poll for s_clk lock or delay 1ms */ udelay(1000); } static void bfa_ioc_ct2_lclk_init(void __iomem *rb) { u32 r32; /* * put l_clk PLL and PLL FSM in reset */ r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN); r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS | __APP_PLL_LCLK_LOGIC_SOFT_RESET); writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG)); /* * set LPU speed (set for FC16 which will work for other modes) */ r32 = readl((rb + CT2_CHIP_MISC_PRG)); writel(r32, (rb + CT2_CHIP_MISC_PRG)); /* * set LPU half speed (set for FC16 which will work for other modes) */ r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG)); /* * set lclk for mode (set for FC16) */ r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED); r32 |= 0x20c1731b; writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG)); /* * poll for s_clk lock or delay 1ms */ udelay(1000); } static void bfa_ioc_ct2_mem_init(void __iomem *rb) { u32 r32; r32 = readl((rb + PSS_CTL_REG)); r32 &= ~__PSS_LMEM_RESET; writel(r32, (rb + PSS_CTL_REG)); udelay(1000); writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG)); udelay(1000); writel(0, (rb + CT2_MBIST_CTL_REG)); } static void bfa_ioc_ct2_mac_reset(void __iomem *rb) { /* put port0, port1 MAC & AHB in reset */ writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET), rb + CT2_CSI_MAC_CONTROL_REG(0)); writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET), rb + CT2_CSI_MAC_CONTROL_REG(1)); } static void bfa_ioc_ct2_enable_flash(void __iomem *rb) { u32 r32; r32 = readl((rb + PSS_GPIO_OUT_REG)); writel(r32 & ~1, (rb + PSS_GPIO_OUT_REG)); r32 = readl((rb + PSS_GPIO_OE_REG)); writel(r32 | 1, (rb + PSS_GPIO_OE_REG)); } #define CT2_NFC_MAX_DELAY 1000 #define CT2_NFC_PAUSE_MAX_DELAY 4000 #define CT2_NFC_VER_VALID 0x147 #define CT2_NFC_STATE_RUNNING 0x20000001 #define BFA_IOC_PLL_POLL 1000000 static bfa_boolean_t bfa_ioc_ct2_nfc_halted(void __iomem *rb) { u32 r32; r32 = readl(rb + CT2_NFC_CSR_SET_REG); if (r32 & __NFC_CONTROLLER_HALTED) return BFA_TRUE; return BFA_FALSE; } static void bfa_ioc_ct2_nfc_halt(void __iomem *rb) { int i; writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_SET_REG); for (i = 0; i < CT2_NFC_MAX_DELAY; i++) { if (bfa_ioc_ct2_nfc_halted(rb)) break; udelay(1000); } WARN_ON(!bfa_ioc_ct2_nfc_halted(rb)); } static void bfa_ioc_ct2_nfc_resume(void __iomem *rb) { u32 r32; int i; writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG); for (i = 0; i < CT2_NFC_MAX_DELAY; i++) { r32 = readl(rb + CT2_NFC_CSR_SET_REG); if (!(r32 & __NFC_CONTROLLER_HALTED)) return; udelay(1000); } WARN_ON(1); } static void bfa_ioc_ct2_clk_reset(void __iomem *rb) { u32 r32; bfa_ioc_ct2_sclk_init(rb); bfa_ioc_ct2_lclk_init(rb); /* * release soft reset on s_clk & l_clk */ r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET, (rb + CT2_APP_PLL_SCLK_CTL_REG)); r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET, (rb + CT2_APP_PLL_LCLK_CTL_REG)); } static void bfa_ioc_ct2_nfc_clk_reset(void __iomem *rb) { u32 r32, i; r32 = readl((rb + PSS_CTL_REG)); r32 |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET); writel(r32, (rb + PSS_CTL_REG)); writel(__RESET_AND_START_SCLK_LCLK_PLLS, rb + CT2_CSI_FW_CTL_SET_REG); for (i = 0; i < BFA_IOC_PLL_POLL; i++) { r32 = readl(rb + CT2_NFC_FLASH_STS_REG); if ((r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS)) break; } WARN_ON(!(r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS)); for (i = 0; i < BFA_IOC_PLL_POLL; i++) { r32 = readl(rb + CT2_NFC_FLASH_STS_REG); if (!(r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS)) break; } WARN_ON((r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS)); r32 = readl(rb + CT2_CSI_FW_CTL_REG); WARN_ON((r32 & __RESET_AND_START_SCLK_LCLK_PLLS)); } static void bfa_ioc_ct2_wait_till_nfc_running(void __iomem *rb) { u32 r32; int i; if (bfa_ioc_ct2_nfc_halted(rb)) bfa_ioc_ct2_nfc_resume(rb); for (i = 0; i < CT2_NFC_PAUSE_MAX_DELAY; i++) { r32 = readl(rb + CT2_NFC_STS_REG); if (r32 == CT2_NFC_STATE_RUNNING) return; udelay(1000); } r32 = readl(rb + CT2_NFC_STS_REG); WARN_ON(!(r32 == CT2_NFC_STATE_RUNNING)); } bfa_status_t bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode) { u32 wgn, r32, nfc_ver; wgn = readl(rb + CT2_WGN_STATUS); if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) { /* * If flash is corrupted, enable flash explicitly */ bfa_ioc_ct2_clk_reset(rb); bfa_ioc_ct2_enable_flash(rb); bfa_ioc_ct2_mac_reset(rb); bfa_ioc_ct2_clk_reset(rb); bfa_ioc_ct2_enable_flash(rb); } else { nfc_ver = readl(rb + CT2_RSC_GPR15_REG); if ((nfc_ver >= CT2_NFC_VER_VALID) && (wgn == (__A2T_AHB_LOAD | __WGN_READY))) { bfa_ioc_ct2_wait_till_nfc_running(rb); bfa_ioc_ct2_nfc_clk_reset(rb); } else { bfa_ioc_ct2_nfc_halt(rb); bfa_ioc_ct2_clk_reset(rb); bfa_ioc_ct2_mac_reset(rb); bfa_ioc_ct2_clk_reset(rb); } } /* * The very first PCIe DMA Read done by LPU fails with a fatal error, * when Address Translation Cache (ATC) has been enabled by system BIOS. * * Workaround: * Disable Invalidated Tag Match Enable capability by setting the bit 26 * of CHIP_MISC_PRG to 0, by default it is set to 1. */ r32 = readl(rb + CT2_CHIP_MISC_PRG); writel((r32 & 0xfbffffff), (rb + CT2_CHIP_MISC_PRG)); /* * Mask the interrupts and clear any * pending interrupts left by BIOS/EFI */ writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK)); writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK)); /* For first time initialization, no need to clear interrupts */ r32 = readl(rb + HOST_SEM5_REG); if (r32 & 0x1) { r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); if (r32 == 1) { writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT)); readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); } r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT)); if (r32 == 1) { writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT)); readl((rb + CT2_LPU1_HOSTFN_CMD_STAT)); } } bfa_ioc_ct2_mem_init(rb); writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG)); writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG)); return BFA_STATUS_OK; } static void bfa_ioc_ct_set_cur_ioc_fwstate(struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate) { writel(fwstate, ioc->ioc_regs.ioc_fwstate); } static enum bfi_ioc_state bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc) { return (enum bfi_ioc_state)readl(ioc->ioc_regs.ioc_fwstate); } static void bfa_ioc_ct_set_alt_ioc_fwstate(struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate) { writel(fwstate, ioc->ioc_regs.alt_ioc_fwstate); } static enum bfi_ioc_state bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc) { return (enum bfi_ioc_state) readl(ioc->ioc_regs.alt_ioc_fwstate); }
linux-master
drivers/scsi/bfa/bfa_ioc_ct.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. * Copyright (c) 2014- QLogic Corporation. * All rights reserved * www.qlogic.com * * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. */ #include "bfad_drv.h" #include "bfa_modules.h" BFA_TRC_FILE(HAL, FCPIM); /* * BFA ITNIM Related definitions */ static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim); #define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \ (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1)))) #define bfa_fcpim_additn(__itnim) \ list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q) #define bfa_fcpim_delitn(__itnim) do { \ WARN_ON(!bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim)); \ bfa_itnim_update_del_itn_stats(__itnim); \ list_del(&(__itnim)->qe); \ WARN_ON(!list_empty(&(__itnim)->io_q)); \ WARN_ON(!list_empty(&(__itnim)->io_cleanup_q)); \ WARN_ON(!list_empty(&(__itnim)->pending_q)); \ } while (0) #define bfa_itnim_online_cb(__itnim) do { \ if ((__itnim)->bfa->fcs) \ bfa_cb_itnim_online((__itnim)->ditn); \ else { \ bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \ __bfa_cb_itnim_online, (__itnim)); \ } \ } while (0) #define bfa_itnim_offline_cb(__itnim) do { \ if ((__itnim)->bfa->fcs) \ bfa_cb_itnim_offline((__itnim)->ditn); \ else { \ bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \ __bfa_cb_itnim_offline, (__itnim)); \ } \ } while (0) #define bfa_itnim_sler_cb(__itnim) do { \ if ((__itnim)->bfa->fcs) \ bfa_cb_itnim_sler((__itnim)->ditn); \ else { \ bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \ __bfa_cb_itnim_sler, (__itnim)); \ } \ } while (0) enum bfa_ioim_lm_ua_status { BFA_IOIM_LM_UA_RESET = 0, BFA_IOIM_LM_UA_SET = 1, }; /* * itnim state machine event */ enum bfa_itnim_event { BFA_ITNIM_SM_CREATE = 1, /* itnim is created */ BFA_ITNIM_SM_ONLINE = 2, /* itnim is online */ BFA_ITNIM_SM_OFFLINE = 3, /* itnim is offline */ BFA_ITNIM_SM_FWRSP = 4, /* firmware response */ BFA_ITNIM_SM_DELETE = 5, /* deleting an existing itnim */ BFA_ITNIM_SM_CLEANUP = 6, /* IO cleanup completion */ BFA_ITNIM_SM_SLER = 7, /* second level error recovery */ BFA_ITNIM_SM_HWFAIL = 8, /* IOC h/w failure event */ BFA_ITNIM_SM_QRESUME = 9, /* queue space available */ }; /* * BFA IOIM related definitions */ #define bfa_ioim_move_to_comp_q(__ioim) do { \ list_del(&(__ioim)->qe); \ list_add_tail(&(__ioim)->qe, &(__ioim)->fcpim->ioim_comp_q); \ } while (0) #define bfa_ioim_cb_profile_comp(__fcpim, __ioim) do { \ if ((__fcpim)->profile_comp) \ (__fcpim)->profile_comp(__ioim); \ } while (0) #define bfa_ioim_cb_profile_start(__fcpim, __ioim) do { \ if ((__fcpim)->profile_start) \ (__fcpim)->profile_start(__ioim); \ } while (0) /* * IO state machine events */ enum bfa_ioim_event { BFA_IOIM_SM_START = 1, /* io start request from host */ BFA_IOIM_SM_COMP_GOOD = 2, /* io good comp, resource free */ BFA_IOIM_SM_COMP = 3, /* io comp, resource is free */ BFA_IOIM_SM_COMP_UTAG = 4, /* io comp, resource is free */ BFA_IOIM_SM_DONE = 5, /* io comp, resource not free */ BFA_IOIM_SM_FREE = 6, /* io resource is freed */ BFA_IOIM_SM_ABORT = 7, /* abort request from scsi stack */ BFA_IOIM_SM_ABORT_COMP = 8, /* abort from f/w */ BFA_IOIM_SM_ABORT_DONE = 9, /* abort completion from f/w */ BFA_IOIM_SM_QRESUME = 10, /* CQ space available to queue IO */ BFA_IOIM_SM_SGALLOCED = 11, /* SG page allocation successful */ BFA_IOIM_SM_SQRETRY = 12, /* sequence recovery retry */ BFA_IOIM_SM_HCB = 13, /* bfa callback complete */ BFA_IOIM_SM_CLEANUP = 14, /* IO cleanup from itnim */ BFA_IOIM_SM_TMSTART = 15, /* IO cleanup from tskim */ BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */ BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */ BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */ }; /* * BFA TSKIM related definitions */ /* * task management completion handling */ #define bfa_tskim_qcomp(__tskim, __cbfn) do { \ bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim));\ bfa_tskim_notify_comp(__tskim); \ } while (0) #define bfa_tskim_notify_comp(__tskim) do { \ if ((__tskim)->notify) \ bfa_itnim_tskdone((__tskim)->itnim); \ } while (0) enum bfa_tskim_event { BFA_TSKIM_SM_START = 1, /* TM command start */ BFA_TSKIM_SM_DONE = 2, /* TM completion */ BFA_TSKIM_SM_QRESUME = 3, /* resume after qfull */ BFA_TSKIM_SM_HWFAIL = 5, /* IOC h/w failure event */ BFA_TSKIM_SM_HCB = 6, /* BFA callback completion */ BFA_TSKIM_SM_IOS_DONE = 7, /* IO and sub TM completions */ BFA_TSKIM_SM_CLEANUP = 8, /* TM cleanup on ITN offline */ BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */ BFA_TSKIM_SM_UTAG = 10, /* TM completion unknown tag */ }; /* * forward declaration for BFA ITNIM functions */ static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim); static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim); static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim); static void bfa_itnim_cleanp_comp(void *itnim_cbarg); static void bfa_itnim_cleanup(struct bfa_itnim_s *itnim); static void __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete); static void __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete); static void __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete); static void bfa_itnim_iotov_online(struct bfa_itnim_s *itnim); static void bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim); static void bfa_itnim_iotov(void *itnim_arg); static void bfa_itnim_iotov_start(struct bfa_itnim_s *itnim); static void bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim); static void bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim); /* * forward declaration of ITNIM state machine */ static void bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event); static void bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event); static void bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event); static void bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim, enum bfa_itnim_event event); static void bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event); static void bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event); static void bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event); static void bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event); static void bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event); static void bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event); static void bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim, enum bfa_itnim_event event); static void bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event); static void bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim, enum bfa_itnim_event event); static void bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim, enum bfa_itnim_event event); static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim, enum bfa_itnim_event event); /* * forward declaration for BFA IOIM functions */ static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim); static bfa_boolean_t bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim); static bfa_boolean_t bfa_ioim_send_abort(struct bfa_ioim_s *ioim); static void bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim); static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete); static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete); static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete); static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete); static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete); static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim); /* * forward declaration of BFA IO state machine */ static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event); static void bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event); static void bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event); static void bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event); static void bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event); static void bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event); static void bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event); static void bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event); static void bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event); static void bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event); static void bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event); static void bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event); /* * forward declaration for BFA TSKIM functions */ static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete); static void __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete); static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim, struct scsi_lun lun); static void bfa_tskim_gather_ios(struct bfa_tskim_s *tskim); static void bfa_tskim_cleanp_comp(void *tskim_cbarg); static void bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim); static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim); static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim); static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim); /* * forward declaration of BFA TSKIM state machine */ static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event); static void bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event); static void bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event); static void bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event); static void bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event); static void bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event); static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event); /* * BFA FCP Initiator Mode module */ /* * Compute and return memory needed by FCP(im) module. */ static void bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len) { bfa_itnim_meminfo(cfg, km_len); /* * IO memory */ *km_len += cfg->fwcfg.num_ioim_reqs * (sizeof(struct bfa_ioim_s) + sizeof(struct bfa_ioim_sp_s)); /* * task management command memory */ if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN) cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN; *km_len += cfg->fwcfg.num_tskim_reqs * sizeof(struct bfa_tskim_s); } static void bfa_fcpim_attach(struct bfa_fcp_mod_s *fcp, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev) { struct bfa_fcpim_s *fcpim = &fcp->fcpim; struct bfa_s *bfa = fcp->bfa; bfa_trc(bfa, cfg->drvcfg.path_tov); bfa_trc(bfa, cfg->fwcfg.num_rports); bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs); bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs); fcpim->fcp = fcp; fcpim->bfa = bfa; fcpim->num_itnims = cfg->fwcfg.num_rports; fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs; fcpim->path_tov = cfg->drvcfg.path_tov; fcpim->delay_comp = cfg->drvcfg.delay_comp; fcpim->profile_comp = NULL; fcpim->profile_start = NULL; bfa_itnim_attach(fcpim); bfa_tskim_attach(fcpim); bfa_ioim_attach(fcpim); } void bfa_fcpim_iocdisable(struct bfa_fcp_mod_s *fcp) { struct bfa_fcpim_s *fcpim = &fcp->fcpim; struct bfa_itnim_s *itnim; struct list_head *qe, *qen; /* Enqueue unused ioim resources to free_q */ list_splice_tail_init(&fcpim->tskim_unused_q, &fcpim->tskim_free_q); list_for_each_safe(qe, qen, &fcpim->itnim_q) { itnim = (struct bfa_itnim_s *) qe; bfa_itnim_iocdisable(itnim); } } void bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov) { struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); fcpim->path_tov = path_tov * 1000; if (fcpim->path_tov > BFA_FCPIM_PATHTOV_MAX) fcpim->path_tov = BFA_FCPIM_PATHTOV_MAX; } u16 bfa_fcpim_path_tov_get(struct bfa_s *bfa) { struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); return fcpim->path_tov / 1000; } #define bfa_fcpim_add_iostats(__l, __r, __stats) \ (__l->__stats += __r->__stats) void bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats, struct bfa_itnim_iostats_s *rstats) { bfa_fcpim_add_iostats(lstats, rstats, total_ios); bfa_fcpim_add_iostats(lstats, rstats, qresumes); bfa_fcpim_add_iostats(lstats, rstats, no_iotags); bfa_fcpim_add_iostats(lstats, rstats, io_aborts); bfa_fcpim_add_iostats(lstats, rstats, no_tskims); bfa_fcpim_add_iostats(lstats, rstats, iocomp_ok); bfa_fcpim_add_iostats(lstats, rstats, iocomp_underrun); bfa_fcpim_add_iostats(lstats, rstats, iocomp_overrun); bfa_fcpim_add_iostats(lstats, rstats, iocomp_aborted); bfa_fcpim_add_iostats(lstats, rstats, iocomp_timedout); bfa_fcpim_add_iostats(lstats, rstats, iocom_nexus_abort); bfa_fcpim_add_iostats(lstats, rstats, iocom_proto_err); bfa_fcpim_add_iostats(lstats, rstats, iocom_dif_err); bfa_fcpim_add_iostats(lstats, rstats, iocom_sqer_needed); bfa_fcpim_add_iostats(lstats, rstats, iocom_res_free); bfa_fcpim_add_iostats(lstats, rstats, iocom_hostabrts); bfa_fcpim_add_iostats(lstats, rstats, iocom_utags); bfa_fcpim_add_iostats(lstats, rstats, io_cleanups); bfa_fcpim_add_iostats(lstats, rstats, io_tmaborts); bfa_fcpim_add_iostats(lstats, rstats, onlines); bfa_fcpim_add_iostats(lstats, rstats, offlines); bfa_fcpim_add_iostats(lstats, rstats, creates); bfa_fcpim_add_iostats(lstats, rstats, deletes); bfa_fcpim_add_iostats(lstats, rstats, create_comps); bfa_fcpim_add_iostats(lstats, rstats, delete_comps); bfa_fcpim_add_iostats(lstats, rstats, sler_events); bfa_fcpim_add_iostats(lstats, rstats, fw_create); bfa_fcpim_add_iostats(lstats, rstats, fw_delete); bfa_fcpim_add_iostats(lstats, rstats, ioc_disabled); bfa_fcpim_add_iostats(lstats, rstats, cleanup_comps); bfa_fcpim_add_iostats(lstats, rstats, tm_cmnds); bfa_fcpim_add_iostats(lstats, rstats, tm_fw_rsps); bfa_fcpim_add_iostats(lstats, rstats, tm_success); bfa_fcpim_add_iostats(lstats, rstats, tm_failures); bfa_fcpim_add_iostats(lstats, rstats, tm_io_comps); bfa_fcpim_add_iostats(lstats, rstats, tm_qresumes); bfa_fcpim_add_iostats(lstats, rstats, tm_iocdowns); bfa_fcpim_add_iostats(lstats, rstats, tm_cleanups); bfa_fcpim_add_iostats(lstats, rstats, tm_cleanup_comps); bfa_fcpim_add_iostats(lstats, rstats, io_comps); bfa_fcpim_add_iostats(lstats, rstats, input_reqs); bfa_fcpim_add_iostats(lstats, rstats, output_reqs); bfa_fcpim_add_iostats(lstats, rstats, rd_throughput); bfa_fcpim_add_iostats(lstats, rstats, wr_throughput); } bfa_status_t bfa_fcpim_port_iostats(struct bfa_s *bfa, struct bfa_itnim_iostats_s *stats, u8 lp_tag) { struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); struct list_head *qe, *qen; struct bfa_itnim_s *itnim; /* accumulate IO stats from itnim */ memset(stats, 0, sizeof(struct bfa_itnim_iostats_s)); list_for_each_safe(qe, qen, &fcpim->itnim_q) { itnim = (struct bfa_itnim_s *) qe; if (itnim->rport->rport_info.lp_tag != lp_tag) continue; bfa_fcpim_add_stats(stats, &(itnim->stats)); } return BFA_STATUS_OK; } static void bfa_ioim_profile_comp(struct bfa_ioim_s *ioim) { struct bfa_itnim_latency_s *io_lat = &(ioim->itnim->ioprofile.io_latency); u32 val, idx; val = (u32)(jiffies - ioim->start_time); idx = bfa_ioim_get_index(scsi_bufflen((struct scsi_cmnd *)ioim->dio)); bfa_itnim_ioprofile_update(ioim->itnim, idx); io_lat->count[idx]++; io_lat->min[idx] = (io_lat->min[idx] < val) ? io_lat->min[idx] : val; io_lat->max[idx] = (io_lat->max[idx] > val) ? io_lat->max[idx] : val; io_lat->avg[idx] += val; } static void bfa_ioim_profile_start(struct bfa_ioim_s *ioim) { ioim->start_time = jiffies; } bfa_status_t bfa_fcpim_profile_on(struct bfa_s *bfa, time64_t time) { struct bfa_itnim_s *itnim; struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); struct list_head *qe, *qen; /* accumulate IO stats from itnim */ list_for_each_safe(qe, qen, &fcpim->itnim_q) { itnim = (struct bfa_itnim_s *) qe; bfa_itnim_clear_stats(itnim); } fcpim->io_profile = BFA_TRUE; fcpim->io_profile_start_time = time; fcpim->profile_comp = bfa_ioim_profile_comp; fcpim->profile_start = bfa_ioim_profile_start; return BFA_STATUS_OK; } bfa_status_t bfa_fcpim_profile_off(struct bfa_s *bfa) { struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); fcpim->io_profile = BFA_FALSE; fcpim->io_profile_start_time = 0; fcpim->profile_comp = NULL; fcpim->profile_start = NULL; return BFA_STATUS_OK; } u16 bfa_fcpim_qdepth_get(struct bfa_s *bfa) { struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); return fcpim->q_depth; } /* * BFA ITNIM module state machine functions */ /* * Beginning/unallocated state - no events expected. */ static void bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) { bfa_trc(itnim->bfa, itnim->rport->rport_tag); bfa_trc(itnim->bfa, event); switch (event) { case BFA_ITNIM_SM_CREATE: bfa_sm_set_state(itnim, bfa_itnim_sm_created); itnim->is_online = BFA_FALSE; bfa_fcpim_additn(itnim); break; default: bfa_sm_fault(itnim->bfa, event); } } /* * Beginning state, only online event expected. */ static void bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) { bfa_trc(itnim->bfa, itnim->rport->rport_tag); bfa_trc(itnim->bfa, event); switch (event) { case BFA_ITNIM_SM_ONLINE: if (bfa_itnim_send_fwcreate(itnim)) bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate); else bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull); break; case BFA_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); bfa_fcpim_delitn(itnim); break; case BFA_ITNIM_SM_HWFAIL: bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); break; default: bfa_sm_fault(itnim->bfa, event); } } /* * Waiting for itnim create response from firmware. */ static void bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) { bfa_trc(itnim->bfa, itnim->rport->rport_tag); bfa_trc(itnim->bfa, event); switch (event) { case BFA_ITNIM_SM_FWRSP: bfa_sm_set_state(itnim, bfa_itnim_sm_online); itnim->is_online = BFA_TRUE; bfa_itnim_iotov_online(itnim); bfa_itnim_online_cb(itnim); break; case BFA_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_itnim_sm_delete_pending); break; case BFA_ITNIM_SM_OFFLINE: if (bfa_itnim_send_fwdelete(itnim)) bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete); else bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull); break; case BFA_ITNIM_SM_HWFAIL: bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); break; default: bfa_sm_fault(itnim->bfa, event); } } static void bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) { bfa_trc(itnim->bfa, itnim->rport->rport_tag); bfa_trc(itnim->bfa, event); switch (event) { case BFA_ITNIM_SM_QRESUME: bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate); bfa_itnim_send_fwcreate(itnim); break; case BFA_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); bfa_reqq_wcancel(&itnim->reqq_wait); bfa_fcpim_delitn(itnim); break; case BFA_ITNIM_SM_OFFLINE: bfa_sm_set_state(itnim, bfa_itnim_sm_offline); bfa_reqq_wcancel(&itnim->reqq_wait); bfa_itnim_offline_cb(itnim); break; case BFA_ITNIM_SM_HWFAIL: bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); bfa_reqq_wcancel(&itnim->reqq_wait); break; default: bfa_sm_fault(itnim->bfa, event); } } /* * Waiting for itnim create response from firmware, a delete is pending. */ static void bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) { bfa_trc(itnim->bfa, itnim->rport->rport_tag); bfa_trc(itnim->bfa, event); switch (event) { case BFA_ITNIM_SM_FWRSP: if (bfa_itnim_send_fwdelete(itnim)) bfa_sm_set_state(itnim, bfa_itnim_sm_deleting); else bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull); break; case BFA_ITNIM_SM_HWFAIL: bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); bfa_fcpim_delitn(itnim); break; default: bfa_sm_fault(itnim->bfa, event); } } /* * Online state - normal parking state. */ static void bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) { bfa_trc(itnim->bfa, itnim->rport->rport_tag); bfa_trc(itnim->bfa, event); switch (event) { case BFA_ITNIM_SM_OFFLINE: bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline); itnim->is_online = BFA_FALSE; bfa_itnim_iotov_start(itnim); bfa_itnim_cleanup(itnim); break; case BFA_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete); itnim->is_online = BFA_FALSE; bfa_itnim_cleanup(itnim); break; case BFA_ITNIM_SM_SLER: bfa_sm_set_state(itnim, bfa_itnim_sm_sler); itnim->is_online = BFA_FALSE; bfa_itnim_iotov_start(itnim); bfa_itnim_sler_cb(itnim); break; case BFA_ITNIM_SM_HWFAIL: bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); itnim->is_online = BFA_FALSE; bfa_itnim_iotov_start(itnim); bfa_itnim_iocdisable_cleanup(itnim); break; default: bfa_sm_fault(itnim->bfa, event); } } /* * Second level error recovery need. */ static void bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) { bfa_trc(itnim->bfa, itnim->rport->rport_tag); bfa_trc(itnim->bfa, event); switch (event) { case BFA_ITNIM_SM_OFFLINE: bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline); bfa_itnim_cleanup(itnim); break; case BFA_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete); bfa_itnim_cleanup(itnim); bfa_itnim_iotov_delete(itnim); break; case BFA_ITNIM_SM_HWFAIL: bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); bfa_itnim_iocdisable_cleanup(itnim); break; default: bfa_sm_fault(itnim->bfa, event); } } /* * Going offline. Waiting for active IO cleanup. */ static void bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) { bfa_trc(itnim->bfa, itnim->rport->rport_tag); bfa_trc(itnim->bfa, event); switch (event) { case BFA_ITNIM_SM_CLEANUP: if (bfa_itnim_send_fwdelete(itnim)) bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete); else bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull); break; case BFA_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete); bfa_itnim_iotov_delete(itnim); break; case BFA_ITNIM_SM_HWFAIL: bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); bfa_itnim_iocdisable_cleanup(itnim); bfa_itnim_offline_cb(itnim); break; case BFA_ITNIM_SM_SLER: break; default: bfa_sm_fault(itnim->bfa, event); } } /* * Deleting itnim. Waiting for active IO cleanup. */ static void bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) { bfa_trc(itnim->bfa, itnim->rport->rport_tag); bfa_trc(itnim->bfa, event); switch (event) { case BFA_ITNIM_SM_CLEANUP: if (bfa_itnim_send_fwdelete(itnim)) bfa_sm_set_state(itnim, bfa_itnim_sm_deleting); else bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull); break; case BFA_ITNIM_SM_HWFAIL: bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); bfa_itnim_iocdisable_cleanup(itnim); break; default: bfa_sm_fault(itnim->bfa, event); } } /* * Rport offline. Fimrware itnim is being deleted - awaiting f/w response. */ static void bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) { bfa_trc(itnim->bfa, itnim->rport->rport_tag); bfa_trc(itnim->bfa, event); switch (event) { case BFA_ITNIM_SM_FWRSP: bfa_sm_set_state(itnim, bfa_itnim_sm_offline); bfa_itnim_offline_cb(itnim); break; case BFA_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_itnim_sm_deleting); break; case BFA_ITNIM_SM_HWFAIL: bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); bfa_itnim_offline_cb(itnim); break; default: bfa_sm_fault(itnim->bfa, event); } } static void bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) { bfa_trc(itnim->bfa, itnim->rport->rport_tag); bfa_trc(itnim->bfa, event); switch (event) { case BFA_ITNIM_SM_QRESUME: bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete); bfa_itnim_send_fwdelete(itnim); break; case BFA_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull); break; case BFA_ITNIM_SM_HWFAIL: bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); bfa_reqq_wcancel(&itnim->reqq_wait); bfa_itnim_offline_cb(itnim); break; default: bfa_sm_fault(itnim->bfa, event); } } /* * Offline state. */ static void bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) { bfa_trc(itnim->bfa, itnim->rport->rport_tag); bfa_trc(itnim->bfa, event); switch (event) { case BFA_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); bfa_itnim_iotov_delete(itnim); bfa_fcpim_delitn(itnim); break; case BFA_ITNIM_SM_ONLINE: if (bfa_itnim_send_fwcreate(itnim)) bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate); else bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull); break; case BFA_ITNIM_SM_HWFAIL: bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); break; default: bfa_sm_fault(itnim->bfa, event); } } static void bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) { bfa_trc(itnim->bfa, itnim->rport->rport_tag); bfa_trc(itnim->bfa, event); switch (event) { case BFA_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); bfa_itnim_iotov_delete(itnim); bfa_fcpim_delitn(itnim); break; case BFA_ITNIM_SM_OFFLINE: bfa_itnim_offline_cb(itnim); break; case BFA_ITNIM_SM_ONLINE: if (bfa_itnim_send_fwcreate(itnim)) bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate); else bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull); break; case BFA_ITNIM_SM_HWFAIL: break; default: bfa_sm_fault(itnim->bfa, event); } } /* * Itnim is deleted, waiting for firmware response to delete. */ static void bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) { bfa_trc(itnim->bfa, itnim->rport->rport_tag); bfa_trc(itnim->bfa, event); switch (event) { case BFA_ITNIM_SM_FWRSP: case BFA_ITNIM_SM_HWFAIL: bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); bfa_fcpim_delitn(itnim); break; default: bfa_sm_fault(itnim->bfa, event); } } static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) { bfa_trc(itnim->bfa, itnim->rport->rport_tag); bfa_trc(itnim->bfa, event); switch (event) { case BFA_ITNIM_SM_QRESUME: bfa_sm_set_state(itnim, bfa_itnim_sm_deleting); bfa_itnim_send_fwdelete(itnim); break; case BFA_ITNIM_SM_HWFAIL: bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); bfa_reqq_wcancel(&itnim->reqq_wait); bfa_fcpim_delitn(itnim); break; default: bfa_sm_fault(itnim->bfa, event); } } /* * Initiate cleanup of all IOs on an IOC failure. */ static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim) { struct bfa_tskim_s *tskim; struct bfa_ioim_s *ioim; struct list_head *qe, *qen; list_for_each_safe(qe, qen, &itnim->tsk_q) { tskim = (struct bfa_tskim_s *) qe; bfa_tskim_iocdisable(tskim); } list_for_each_safe(qe, qen, &itnim->io_q) { ioim = (struct bfa_ioim_s *) qe; bfa_ioim_iocdisable(ioim); } /* * For IO request in pending queue, we pretend an early timeout. */ list_for_each_safe(qe, qen, &itnim->pending_q) { ioim = (struct bfa_ioim_s *) qe; bfa_ioim_tov(ioim); } list_for_each_safe(qe, qen, &itnim->io_cleanup_q) { ioim = (struct bfa_ioim_s *) qe; bfa_ioim_iocdisable(ioim); } } /* * IO cleanup completion */ static void bfa_itnim_cleanp_comp(void *itnim_cbarg) { struct bfa_itnim_s *itnim = itnim_cbarg; bfa_stats(itnim, cleanup_comps); bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP); } /* * Initiate cleanup of all IOs. */ static void bfa_itnim_cleanup(struct bfa_itnim_s *itnim) { struct bfa_ioim_s *ioim; struct bfa_tskim_s *tskim; struct list_head *qe, *qen; bfa_wc_init(&itnim->wc, bfa_itnim_cleanp_comp, itnim); list_for_each_safe(qe, qen, &itnim->io_q) { ioim = (struct bfa_ioim_s *) qe; /* * Move IO to a cleanup queue from active queue so that a later * TM will not pickup this IO. */ list_del(&ioim->qe); list_add_tail(&ioim->qe, &itnim->io_cleanup_q); bfa_wc_up(&itnim->wc); bfa_ioim_cleanup(ioim); } list_for_each_safe(qe, qen, &itnim->tsk_q) { tskim = (struct bfa_tskim_s *) qe; bfa_wc_up(&itnim->wc); bfa_tskim_cleanup(tskim); } bfa_wc_wait(&itnim->wc); } static void __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete) { struct bfa_itnim_s *itnim = cbarg; if (complete) bfa_cb_itnim_online(itnim->ditn); } static void __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete) { struct bfa_itnim_s *itnim = cbarg; if (complete) bfa_cb_itnim_offline(itnim->ditn); } static void __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete) { struct bfa_itnim_s *itnim = cbarg; if (complete) bfa_cb_itnim_sler(itnim->ditn); } /* * Call to resume any I/O requests waiting for room in request queue. */ static void bfa_itnim_qresume(void *cbarg) { struct bfa_itnim_s *itnim = cbarg; bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME); } /* * bfa_itnim_public */ void bfa_itnim_iodone(struct bfa_itnim_s *itnim) { bfa_wc_down(&itnim->wc); } void bfa_itnim_tskdone(struct bfa_itnim_s *itnim) { bfa_wc_down(&itnim->wc); } void bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len) { /* * ITN memory */ *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s); } void bfa_itnim_attach(struct bfa_fcpim_s *fcpim) { struct bfa_s *bfa = fcpim->bfa; struct bfa_fcp_mod_s *fcp = fcpim->fcp; struct bfa_itnim_s *itnim; int i, j; INIT_LIST_HEAD(&fcpim->itnim_q); itnim = (struct bfa_itnim_s *) bfa_mem_kva_curp(fcp); fcpim->itnim_arr = itnim; for (i = 0; i < fcpim->num_itnims; i++, itnim++) { memset(itnim, 0, sizeof(struct bfa_itnim_s)); itnim->bfa = bfa; itnim->fcpim = fcpim; itnim->reqq = BFA_REQQ_QOS_LO; itnim->rport = BFA_RPORT_FROM_TAG(bfa, i); itnim->iotov_active = BFA_FALSE; bfa_reqq_winit(&itnim->reqq_wait, bfa_itnim_qresume, itnim); INIT_LIST_HEAD(&itnim->io_q); INIT_LIST_HEAD(&itnim->io_cleanup_q); INIT_LIST_HEAD(&itnim->pending_q); INIT_LIST_HEAD(&itnim->tsk_q); INIT_LIST_HEAD(&itnim->delay_comp_q); for (j = 0; j < BFA_IOBUCKET_MAX; j++) itnim->ioprofile.io_latency.min[j] = ~0; bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); } bfa_mem_kva_curp(fcp) = (u8 *) itnim; } void bfa_itnim_iocdisable(struct bfa_itnim_s *itnim) { bfa_stats(itnim, ioc_disabled); bfa_sm_send_event(itnim, BFA_ITNIM_SM_HWFAIL); } static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim) { struct bfi_itn_create_req_s *m; itnim->msg_no++; /* * check for room in queue to send request now */ m = bfa_reqq_next(itnim->bfa, itnim->reqq); if (!m) { bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait); return BFA_FALSE; } bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_CREATE_REQ, bfa_fn_lpu(itnim->bfa)); m->fw_handle = itnim->rport->fw_handle; m->class = FC_CLASS_3; m->seq_rec = itnim->seq_rec; m->msg_no = itnim->msg_no; bfa_stats(itnim, fw_create); /* * queue I/O message to firmware */ bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh); return BFA_TRUE; } static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim) { struct bfi_itn_delete_req_s *m; /* * check for room in queue to send request now */ m = bfa_reqq_next(itnim->bfa, itnim->reqq); if (!m) { bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait); return BFA_FALSE; } bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_DELETE_REQ, bfa_fn_lpu(itnim->bfa)); m->fw_handle = itnim->rport->fw_handle; bfa_stats(itnim, fw_delete); /* * queue I/O message to firmware */ bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh); return BFA_TRUE; } /* * Cleanup all pending failed inflight requests. */ static void bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov) { struct bfa_ioim_s *ioim; struct list_head *qe, *qen; list_for_each_safe(qe, qen, &itnim->delay_comp_q) { ioim = (struct bfa_ioim_s *)qe; bfa_ioim_delayed_comp(ioim, iotov); } } /* * Start all pending IO requests. */ static void bfa_itnim_iotov_online(struct bfa_itnim_s *itnim) { struct bfa_ioim_s *ioim; bfa_itnim_iotov_stop(itnim); /* * Abort all inflight IO requests in the queue */ bfa_itnim_delayed_comp(itnim, BFA_FALSE); /* * Start all pending IO requests. */ while (!list_empty(&itnim->pending_q)) { bfa_q_deq(&itnim->pending_q, &ioim); list_add_tail(&ioim->qe, &itnim->io_q); bfa_ioim_start(ioim); } } /* * Fail all pending IO requests */ static void bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim) { struct bfa_ioim_s *ioim; /* * Fail all inflight IO requests in the queue */ bfa_itnim_delayed_comp(itnim, BFA_TRUE); /* * Fail any pending IO requests. */ while (!list_empty(&itnim->pending_q)) { bfa_q_deq(&itnim->pending_q, &ioim); list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); bfa_ioim_tov(ioim); } } /* * IO TOV timer callback. Fail any pending IO requests. */ static void bfa_itnim_iotov(void *itnim_arg) { struct bfa_itnim_s *itnim = itnim_arg; itnim->iotov_active = BFA_FALSE; bfa_cb_itnim_tov_begin(itnim->ditn); bfa_itnim_iotov_cleanup(itnim); bfa_cb_itnim_tov(itnim->ditn); } /* * Start IO TOV timer for failing back pending IO requests in offline state. */ static void bfa_itnim_iotov_start(struct bfa_itnim_s *itnim) { if (itnim->fcpim->path_tov > 0) { itnim->iotov_active = BFA_TRUE; WARN_ON(!bfa_itnim_hold_io(itnim)); bfa_timer_start(itnim->bfa, &itnim->timer, bfa_itnim_iotov, itnim, itnim->fcpim->path_tov); } } /* * Stop IO TOV timer. */ static void bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim) { if (itnim->iotov_active) { itnim->iotov_active = BFA_FALSE; bfa_timer_stop(&itnim->timer); } } /* * Stop IO TOV timer. */ static void bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim) { bfa_boolean_t pathtov_active = BFA_FALSE; if (itnim->iotov_active) pathtov_active = BFA_TRUE; bfa_itnim_iotov_stop(itnim); if (pathtov_active) bfa_cb_itnim_tov_begin(itnim->ditn); bfa_itnim_iotov_cleanup(itnim); if (pathtov_active) bfa_cb_itnim_tov(itnim->ditn); } static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim) { struct bfa_fcpim_s *fcpim = BFA_FCPIM(itnim->bfa); fcpim->del_itn_stats.del_itn_iocomp_aborted += itnim->stats.iocomp_aborted; fcpim->del_itn_stats.del_itn_iocomp_timedout += itnim->stats.iocomp_timedout; fcpim->del_itn_stats.del_itn_iocom_sqer_needed += itnim->stats.iocom_sqer_needed; fcpim->del_itn_stats.del_itn_iocom_res_free += itnim->stats.iocom_res_free; fcpim->del_itn_stats.del_itn_iocom_hostabrts += itnim->stats.iocom_hostabrts; fcpim->del_itn_stats.del_itn_total_ios += itnim->stats.total_ios; fcpim->del_itn_stats.del_io_iocdowns += itnim->stats.io_iocdowns; fcpim->del_itn_stats.del_tm_iocdowns += itnim->stats.tm_iocdowns; } /* * bfa_itnim_public */ /* * Itnim interrupt processing. */ void bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) { struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); union bfi_itn_i2h_msg_u msg; struct bfa_itnim_s *itnim; bfa_trc(bfa, m->mhdr.msg_id); msg.msg = m; switch (m->mhdr.msg_id) { case BFI_ITN_I2H_CREATE_RSP: itnim = BFA_ITNIM_FROM_TAG(fcpim, msg.create_rsp->bfa_handle); WARN_ON(msg.create_rsp->status != BFA_STATUS_OK); bfa_stats(itnim, create_comps); bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP); break; case BFI_ITN_I2H_DELETE_RSP: itnim = BFA_ITNIM_FROM_TAG(fcpim, msg.delete_rsp->bfa_handle); WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK); bfa_stats(itnim, delete_comps); bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP); break; case BFI_ITN_I2H_SLER_EVENT: itnim = BFA_ITNIM_FROM_TAG(fcpim, msg.sler_event->bfa_handle); bfa_stats(itnim, sler_events); bfa_sm_send_event(itnim, BFA_ITNIM_SM_SLER); break; default: bfa_trc(bfa, m->mhdr.msg_id); WARN_ON(1); } } /* * bfa_itnim_api */ struct bfa_itnim_s * bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn) { struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); struct bfa_itnim_s *itnim; bfa_itn_create(bfa, rport, bfa_itnim_isr); itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag); WARN_ON(itnim->rport != rport); itnim->ditn = ditn; bfa_stats(itnim, creates); bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE); return itnim; } void bfa_itnim_delete(struct bfa_itnim_s *itnim) { bfa_stats(itnim, deletes); bfa_sm_send_event(itnim, BFA_ITNIM_SM_DELETE); } void bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec) { itnim->seq_rec = seq_rec; bfa_stats(itnim, onlines); bfa_sm_send_event(itnim, BFA_ITNIM_SM_ONLINE); } void bfa_itnim_offline(struct bfa_itnim_s *itnim) { bfa_stats(itnim, offlines); bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE); } /* * Return true if itnim is considered offline for holding off IO request. * IO is not held if itnim is being deleted. */ bfa_boolean_t bfa_itnim_hold_io(struct bfa_itnim_s *itnim) { return itnim->fcpim->path_tov && itnim->iotov_active && (bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) || bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) || bfa_sm_cmp_state(itnim, bfa_itnim_sm_cleanup_offline) || bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) || bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) || bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable)); } #define bfa_io_lat_clock_res_div HZ #define bfa_io_lat_clock_res_mul 1000 bfa_status_t bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim, struct bfa_itnim_ioprofile_s *ioprofile) { struct bfa_fcpim_s *fcpim; if (!itnim) return BFA_STATUS_NO_FCPIM_NEXUS; fcpim = BFA_FCPIM(itnim->bfa); if (!fcpim->io_profile) return BFA_STATUS_IOPROFILE_OFF; itnim->ioprofile.index = BFA_IOBUCKET_MAX; /* unsigned 32-bit time_t overflow here in y2106 */ itnim->ioprofile.io_profile_start_time = bfa_io_profile_start_time(itnim->bfa); itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul; itnim->ioprofile.clock_res_div = bfa_io_lat_clock_res_div; *ioprofile = itnim->ioprofile; return BFA_STATUS_OK; } void bfa_itnim_clear_stats(struct bfa_itnim_s *itnim) { int j; if (!itnim) return; memset(&itnim->stats, 0, sizeof(itnim->stats)); memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile)); for (j = 0; j < BFA_IOBUCKET_MAX; j++) itnim->ioprofile.io_latency.min[j] = ~0; } /* * BFA IO module state machine functions */ /* * IO is not started (unallocated). */ static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) { switch (event) { case BFA_IOIM_SM_START: if (!bfa_itnim_is_online(ioim->itnim)) { if (!bfa_itnim_hold_io(ioim->itnim)) { bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); list_del(&ioim->qe); list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_pathtov, ioim); } else { list_del(&ioim->qe); list_add_tail(&ioim->qe, &ioim->itnim->pending_q); } break; } if (ioim->nsges > BFI_SGE_INLINE) { if (!bfa_ioim_sgpg_alloc(ioim)) { bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc); return; } } if (!bfa_ioim_send_ioreq(ioim)) { bfa_sm_set_state(ioim, bfa_ioim_sm_qfull); break; } bfa_sm_set_state(ioim, bfa_ioim_sm_active); break; case BFA_IOIM_SM_IOTOV: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); bfa_ioim_move_to_comp_q(ioim); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_pathtov, ioim); break; case BFA_IOIM_SM_ABORT: /* * IO in pending queue can get abort requests. Complete abort * requests immediately. */ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); WARN_ON(!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim)); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, ioim); break; default: bfa_sm_fault(ioim->bfa, event); } } /* * IO is waiting for SG pages. */ static void bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) { bfa_trc(ioim->bfa, ioim->iotag); bfa_trc(ioim->bfa, event); switch (event) { case BFA_IOIM_SM_SGALLOCED: if (!bfa_ioim_send_ioreq(ioim)) { bfa_sm_set_state(ioim, bfa_ioim_sm_qfull); break; } bfa_sm_set_state(ioim, bfa_ioim_sm_active); break; case BFA_IOIM_SM_CLEANUP: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, ioim); bfa_ioim_notify_cleanup(ioim); break; case BFA_IOIM_SM_ABORT: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe); bfa_ioim_move_to_comp_q(ioim); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, ioim); break; case BFA_IOIM_SM_HWFAIL: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe); bfa_ioim_move_to_comp_q(ioim); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, ioim); break; default: bfa_sm_fault(ioim->bfa, event); } } /* * IO is active. */ static void bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) { switch (event) { case BFA_IOIM_SM_COMP_GOOD: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); bfa_ioim_move_to_comp_q(ioim); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_good_comp, ioim); break; case BFA_IOIM_SM_COMP: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); bfa_ioim_move_to_comp_q(ioim); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp, ioim); break; case BFA_IOIM_SM_DONE: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); bfa_ioim_move_to_comp_q(ioim); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp, ioim); break; case BFA_IOIM_SM_ABORT: ioim->iosp->abort_explicit = BFA_TRUE; ioim->io_cbfn = __bfa_cb_ioim_abort; if (bfa_ioim_send_abort(ioim)) bfa_sm_set_state(ioim, bfa_ioim_sm_abort); else { bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull); bfa_stats(ioim->itnim, qwait); bfa_reqq_wait(ioim->bfa, ioim->reqq, &ioim->iosp->reqq_wait); } break; case BFA_IOIM_SM_CLEANUP: ioim->iosp->abort_explicit = BFA_FALSE; ioim->io_cbfn = __bfa_cb_ioim_failed; if (bfa_ioim_send_abort(ioim)) bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup); else { bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull); bfa_stats(ioim->itnim, qwait); bfa_reqq_wait(ioim->bfa, ioim->reqq, &ioim->iosp->reqq_wait); } break; case BFA_IOIM_SM_HWFAIL: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); bfa_ioim_move_to_comp_q(ioim); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, ioim); break; case BFA_IOIM_SM_SQRETRY: if (bfa_ioim_maxretry_reached(ioim)) { /* max retry reached, free IO */ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); bfa_ioim_move_to_comp_q(ioim); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, ioim); break; } /* waiting for IO tag resource free */ bfa_sm_set_state(ioim, bfa_ioim_sm_cmnd_retry); break; default: bfa_sm_fault(ioim->bfa, event); } } /* * IO is retried with new tag. */ static void bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) { switch (event) { case BFA_IOIM_SM_FREE: /* abts and rrq done. Now retry the IO with new tag */ bfa_ioim_update_iotag(ioim); if (!bfa_ioim_send_ioreq(ioim)) { bfa_sm_set_state(ioim, bfa_ioim_sm_qfull); break; } bfa_sm_set_state(ioim, bfa_ioim_sm_active); break; case BFA_IOIM_SM_CLEANUP: ioim->iosp->abort_explicit = BFA_FALSE; ioim->io_cbfn = __bfa_cb_ioim_failed; if (bfa_ioim_send_abort(ioim)) bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup); else { bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull); bfa_stats(ioim->itnim, qwait); bfa_reqq_wait(ioim->bfa, ioim->reqq, &ioim->iosp->reqq_wait); } break; case BFA_IOIM_SM_HWFAIL: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); bfa_ioim_move_to_comp_q(ioim); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, ioim); break; case BFA_IOIM_SM_ABORT: /* in this state IO abort is done. * Waiting for IO tag resource free. */ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, ioim); break; default: bfa_sm_fault(ioim->bfa, event); } } /* * IO is being aborted, waiting for completion from firmware. */ static void bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) { bfa_trc(ioim->bfa, ioim->iotag); bfa_trc(ioim->bfa, event); switch (event) { case BFA_IOIM_SM_COMP_GOOD: case BFA_IOIM_SM_COMP: case BFA_IOIM_SM_DONE: case BFA_IOIM_SM_FREE: break; case BFA_IOIM_SM_ABORT_DONE: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, ioim); break; case BFA_IOIM_SM_ABORT_COMP: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); bfa_ioim_move_to_comp_q(ioim); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, ioim); break; case BFA_IOIM_SM_COMP_UTAG: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); bfa_ioim_move_to_comp_q(ioim); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, ioim); break; case BFA_IOIM_SM_CLEANUP: WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE); ioim->iosp->abort_explicit = BFA_FALSE; if (bfa_ioim_send_abort(ioim)) bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup); else { bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull); bfa_stats(ioim->itnim, qwait); bfa_reqq_wait(ioim->bfa, ioim->reqq, &ioim->iosp->reqq_wait); } break; case BFA_IOIM_SM_HWFAIL: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); bfa_ioim_move_to_comp_q(ioim); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, ioim); break; default: bfa_sm_fault(ioim->bfa, event); } } /* * IO is being cleaned up (implicit abort), waiting for completion from * firmware. */ static void bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) { bfa_trc(ioim->bfa, ioim->iotag); bfa_trc(ioim->bfa, event); switch (event) { case BFA_IOIM_SM_COMP_GOOD: case BFA_IOIM_SM_COMP: case BFA_IOIM_SM_DONE: case BFA_IOIM_SM_FREE: break; case BFA_IOIM_SM_ABORT: /* * IO is already being aborted implicitly */ ioim->io_cbfn = __bfa_cb_ioim_abort; break; case BFA_IOIM_SM_ABORT_DONE: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); bfa_ioim_notify_cleanup(ioim); break; case BFA_IOIM_SM_ABORT_COMP: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); bfa_ioim_notify_cleanup(ioim); break; case BFA_IOIM_SM_COMP_UTAG: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); bfa_ioim_notify_cleanup(ioim); break; case BFA_IOIM_SM_HWFAIL: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); bfa_ioim_move_to_comp_q(ioim); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, ioim); break; case BFA_IOIM_SM_CLEANUP: /* * IO can be in cleanup state already due to TM command. * 2nd cleanup request comes from ITN offline event. */ break; default: bfa_sm_fault(ioim->bfa, event); } } /* * IO is waiting for room in request CQ */ static void bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) { bfa_trc(ioim->bfa, ioim->iotag); bfa_trc(ioim->bfa, event); switch (event) { case BFA_IOIM_SM_QRESUME: bfa_sm_set_state(ioim, bfa_ioim_sm_active); bfa_ioim_send_ioreq(ioim); break; case BFA_IOIM_SM_ABORT: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); bfa_reqq_wcancel(&ioim->iosp->reqq_wait); bfa_ioim_move_to_comp_q(ioim); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, ioim); break; case BFA_IOIM_SM_CLEANUP: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); bfa_reqq_wcancel(&ioim->iosp->reqq_wait); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, ioim); bfa_ioim_notify_cleanup(ioim); break; case BFA_IOIM_SM_HWFAIL: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); bfa_reqq_wcancel(&ioim->iosp->reqq_wait); bfa_ioim_move_to_comp_q(ioim); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, ioim); break; default: bfa_sm_fault(ioim->bfa, event); } } /* * Active IO is being aborted, waiting for room in request CQ. */ static void bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) { bfa_trc(ioim->bfa, ioim->iotag); bfa_trc(ioim->bfa, event); switch (event) { case BFA_IOIM_SM_QRESUME: bfa_sm_set_state(ioim, bfa_ioim_sm_abort); bfa_ioim_send_abort(ioim); break; case BFA_IOIM_SM_CLEANUP: WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE); ioim->iosp->abort_explicit = BFA_FALSE; bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull); break; case BFA_IOIM_SM_COMP_GOOD: case BFA_IOIM_SM_COMP: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); bfa_reqq_wcancel(&ioim->iosp->reqq_wait); bfa_ioim_move_to_comp_q(ioim); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, ioim); break; case BFA_IOIM_SM_DONE: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); bfa_reqq_wcancel(&ioim->iosp->reqq_wait); bfa_ioim_move_to_comp_q(ioim); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, ioim); break; case BFA_IOIM_SM_HWFAIL: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); bfa_reqq_wcancel(&ioim->iosp->reqq_wait); bfa_ioim_move_to_comp_q(ioim); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, ioim); break; default: bfa_sm_fault(ioim->bfa, event); } } /* * Active IO is being cleaned up, waiting for room in request CQ. */ static void bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) { bfa_trc(ioim->bfa, ioim->iotag); bfa_trc(ioim->bfa, event); switch (event) { case BFA_IOIM_SM_QRESUME: bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup); bfa_ioim_send_abort(ioim); break; case BFA_IOIM_SM_ABORT: /* * IO is already being cleaned up implicitly */ ioim->io_cbfn = __bfa_cb_ioim_abort; break; case BFA_IOIM_SM_COMP_GOOD: case BFA_IOIM_SM_COMP: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); bfa_reqq_wcancel(&ioim->iosp->reqq_wait); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); bfa_ioim_notify_cleanup(ioim); break; case BFA_IOIM_SM_DONE: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); bfa_reqq_wcancel(&ioim->iosp->reqq_wait); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); bfa_ioim_notify_cleanup(ioim); break; case BFA_IOIM_SM_HWFAIL: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); bfa_reqq_wcancel(&ioim->iosp->reqq_wait); bfa_ioim_move_to_comp_q(ioim); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, ioim); break; default: bfa_sm_fault(ioim->bfa, event); } } /* * IO bfa callback is pending. */ static void bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) { switch (event) { case BFA_IOIM_SM_HCB: bfa_sm_set_state(ioim, bfa_ioim_sm_uninit); bfa_ioim_free(ioim); break; case BFA_IOIM_SM_CLEANUP: bfa_ioim_notify_cleanup(ioim); break; case BFA_IOIM_SM_HWFAIL: break; default: bfa_sm_fault(ioim->bfa, event); } } /* * IO bfa callback is pending. IO resource cannot be freed. */ static void bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) { bfa_trc(ioim->bfa, ioim->iotag); bfa_trc(ioim->bfa, event); switch (event) { case BFA_IOIM_SM_HCB: bfa_sm_set_state(ioim, bfa_ioim_sm_resfree); list_del(&ioim->qe); list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q); break; case BFA_IOIM_SM_FREE: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); break; case BFA_IOIM_SM_CLEANUP: bfa_ioim_notify_cleanup(ioim); break; case BFA_IOIM_SM_HWFAIL: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); break; default: bfa_sm_fault(ioim->bfa, event); } } /* * IO is completed, waiting resource free from firmware. */ static void bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) { bfa_trc(ioim->bfa, ioim->iotag); bfa_trc(ioim->bfa, event); switch (event) { case BFA_IOIM_SM_FREE: bfa_sm_set_state(ioim, bfa_ioim_sm_uninit); bfa_ioim_free(ioim); break; case BFA_IOIM_SM_CLEANUP: bfa_ioim_notify_cleanup(ioim); break; case BFA_IOIM_SM_HWFAIL: break; default: bfa_sm_fault(ioim->bfa, event); } } /* * This is called from bfa_fcpim_start after the bfa_init() with flash read * is complete by driver. now invalidate the stale content of lun mask * like unit attention, rp tag and lp tag. */ void bfa_ioim_lm_init(struct bfa_s *bfa) { struct bfa_lun_mask_s *lunm_list; int i; if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG) return; lunm_list = bfa_get_lun_mask_list(bfa); for (i = 0; i < MAX_LUN_MASK_CFG; i++) { lunm_list[i].ua = BFA_IOIM_LM_UA_RESET; lunm_list[i].lp_tag = BFA_LP_TAG_INVALID; lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID; } } static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete) { struct bfa_ioim_s *ioim = cbarg; if (!complete) { bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB); return; } bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio); } static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete) { struct bfa_ioim_s *ioim = cbarg; struct bfi_ioim_rsp_s *m; u8 *snsinfo = NULL; u8 sns_len = 0; s32 residue = 0; if (!complete) { bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB); return; } m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg; if (m->io_status == BFI_IOIM_STS_OK) { /* * setup sense information, if present */ if ((m->scsi_status == SAM_STAT_CHECK_CONDITION) && m->sns_len) { sns_len = m->sns_len; snsinfo = BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp, ioim->iotag); } /* * setup residue value correctly for normal completions */ if (m->resid_flags == FCP_RESID_UNDER) { residue = be32_to_cpu(m->residue); bfa_stats(ioim->itnim, iocomp_underrun); } if (m->resid_flags == FCP_RESID_OVER) { residue = be32_to_cpu(m->residue); residue = -residue; bfa_stats(ioim->itnim, iocomp_overrun); } } bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status, m->scsi_status, sns_len, snsinfo, residue); } void bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn, wwn_t rp_wwn, u16 rp_tag, u8 lp_tag) { struct bfa_lun_mask_s *lun_list; u8 i; if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG) return; lun_list = bfa_get_lun_mask_list(bfa); for (i = 0; i < MAX_LUN_MASK_CFG; i++) { if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) { if ((lun_list[i].lp_wwn == lp_wwn) && (lun_list[i].rp_wwn == rp_wwn)) { lun_list[i].rp_tag = rp_tag; lun_list[i].lp_tag = lp_tag; } } } } /* * set UA for all active luns in LM DB */ static void bfa_ioim_lm_set_ua(struct bfa_s *bfa) { struct bfa_lun_mask_s *lunm_list; int i; lunm_list = bfa_get_lun_mask_list(bfa); for (i = 0; i < MAX_LUN_MASK_CFG; i++) { if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE) continue; lunm_list[i].ua = BFA_IOIM_LM_UA_SET; } } bfa_status_t bfa_fcpim_lunmask_update(struct bfa_s *bfa, u32 update) { struct bfa_lunmask_cfg_s *lun_mask; bfa_trc(bfa, bfa_get_lun_mask_status(bfa)); if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG) return BFA_STATUS_FAILED; if (bfa_get_lun_mask_status(bfa) == update) return BFA_STATUS_NO_CHANGE; lun_mask = bfa_get_lun_mask(bfa); lun_mask->status = update; if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED) bfa_ioim_lm_set_ua(bfa); return bfa_dconf_update(bfa); } bfa_status_t bfa_fcpim_lunmask_clear(struct bfa_s *bfa) { int i; struct bfa_lun_mask_s *lunm_list; bfa_trc(bfa, bfa_get_lun_mask_status(bfa)); if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG) return BFA_STATUS_FAILED; lunm_list = bfa_get_lun_mask_list(bfa); for (i = 0; i < MAX_LUN_MASK_CFG; i++) { if (lunm_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) { if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID) bfa_rport_unset_lunmask(bfa, BFA_RPORT_FROM_TAG(bfa, lunm_list[i].rp_tag)); } } memset(lunm_list, 0, sizeof(struct bfa_lun_mask_s) * MAX_LUN_MASK_CFG); return bfa_dconf_update(bfa); } bfa_status_t bfa_fcpim_lunmask_query(struct bfa_s *bfa, void *buf) { struct bfa_lunmask_cfg_s *lun_mask; bfa_trc(bfa, bfa_get_lun_mask_status(bfa)); if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG) return BFA_STATUS_FAILED; lun_mask = bfa_get_lun_mask(bfa); memcpy(buf, lun_mask, sizeof(struct bfa_lunmask_cfg_s)); return BFA_STATUS_OK; } bfa_status_t bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn, wwn_t rpwwn, struct scsi_lun lun) { struct bfa_lun_mask_s *lunm_list; struct bfa_rport_s *rp = NULL; int i, free_index = MAX_LUN_MASK_CFG + 1; struct bfa_fcs_lport_s *port = NULL; struct bfa_fcs_rport_s *rp_fcs; bfa_trc(bfa, bfa_get_lun_mask_status(bfa)); if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG) return BFA_STATUS_FAILED; port = bfa_fcs_lookup_port(&((struct bfad_s *)bfa->bfad)->bfa_fcs, vf_id, *pwwn); if (port) { *pwwn = port->port_cfg.pwwn; rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn); if (rp_fcs) rp = rp_fcs->bfa_rport; } lunm_list = bfa_get_lun_mask_list(bfa); /* if entry exists */ for (i = 0; i < MAX_LUN_MASK_CFG; i++) { if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE) free_index = i; if ((lunm_list[i].lp_wwn == *pwwn) && (lunm_list[i].rp_wwn == rpwwn) && (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) == scsilun_to_int((struct scsi_lun *)&lun))) return BFA_STATUS_ENTRY_EXISTS; } if (free_index > MAX_LUN_MASK_CFG) return BFA_STATUS_MAX_ENTRY_REACHED; if (rp) { lunm_list[free_index].lp_tag = bfa_lps_get_tag_from_pid(bfa, rp->rport_info.local_pid); lunm_list[free_index].rp_tag = rp->rport_tag; } else { lunm_list[free_index].lp_tag = BFA_LP_TAG_INVALID; lunm_list[free_index].rp_tag = BFA_RPORT_TAG_INVALID; } lunm_list[free_index].lp_wwn = *pwwn; lunm_list[free_index].rp_wwn = rpwwn; lunm_list[free_index].lun = lun; lunm_list[free_index].state = BFA_IOIM_LUN_MASK_ACTIVE; /* set for all luns in this rp */ for (i = 0; i < MAX_LUN_MASK_CFG; i++) { if ((lunm_list[i].lp_wwn == *pwwn) && (lunm_list[i].rp_wwn == rpwwn)) lunm_list[i].ua = BFA_IOIM_LM_UA_SET; } return bfa_dconf_update(bfa); } bfa_status_t bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn, wwn_t rpwwn, struct scsi_lun lun) { struct bfa_lun_mask_s *lunm_list; struct bfa_fcs_lport_s *port = NULL; int i; /* in min cfg lunm_list could be NULL but no commands should run. */ if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG) return BFA_STATUS_FAILED; bfa_trc(bfa, bfa_get_lun_mask_status(bfa)); bfa_trc(bfa, *pwwn); bfa_trc(bfa, rpwwn); bfa_trc(bfa, scsilun_to_int((struct scsi_lun *)&lun)); if (*pwwn == 0) { port = bfa_fcs_lookup_port( &((struct bfad_s *)bfa->bfad)->bfa_fcs, vf_id, *pwwn); if (port) *pwwn = port->port_cfg.pwwn; } lunm_list = bfa_get_lun_mask_list(bfa); for (i = 0; i < MAX_LUN_MASK_CFG; i++) { if ((lunm_list[i].lp_wwn == *pwwn) && (lunm_list[i].rp_wwn == rpwwn) && (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) == scsilun_to_int((struct scsi_lun *)&lun))) { lunm_list[i].lp_wwn = 0; lunm_list[i].rp_wwn = 0; int_to_scsilun(0, &lunm_list[i].lun); lunm_list[i].state = BFA_IOIM_LUN_MASK_INACTIVE; if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID) { lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID; lunm_list[i].lp_tag = BFA_LP_TAG_INVALID; } return bfa_dconf_update(bfa); } } /* set for all luns in this rp */ for (i = 0; i < MAX_LUN_MASK_CFG; i++) { if ((lunm_list[i].lp_wwn == *pwwn) && (lunm_list[i].rp_wwn == rpwwn)) lunm_list[i].ua = BFA_IOIM_LM_UA_SET; } return BFA_STATUS_ENTRY_NOT_EXISTS; } static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete) { struct bfa_ioim_s *ioim = cbarg; if (!complete) { bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB); return; } bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED, 0, 0, NULL, 0); } static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete) { struct bfa_ioim_s *ioim = cbarg; bfa_stats(ioim->itnim, path_tov_expired); if (!complete) { bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB); return; } bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV, 0, 0, NULL, 0); } static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete) { struct bfa_ioim_s *ioim = cbarg; if (!complete) { bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB); return; } bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio); } static void bfa_ioim_sgpg_alloced(void *cbarg) { struct bfa_ioim_s *ioim = cbarg; ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges); list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q); ioim->sgpg = bfa_q_first(&ioim->sgpg_q); bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED); } /* * Send I/O request to firmware. */ static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim) { struct bfa_itnim_s *itnim = ioim->itnim; struct bfi_ioim_req_s *m; static struct fcp_cmnd_s cmnd_z0 = { { { 0 } } }; struct bfi_sge_s *sge, *sgpge; u32 pgdlen = 0; u32 fcp_dl; u64 addr; struct scatterlist *sg; struct bfa_sgpg_s *sgpg; struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio; u32 i, sge_id, pgcumsz; enum dma_data_direction dmadir; /* * check for room in queue to send request now */ m = bfa_reqq_next(ioim->bfa, ioim->reqq); if (!m) { bfa_stats(ioim->itnim, qwait); bfa_reqq_wait(ioim->bfa, ioim->reqq, &ioim->iosp->reqq_wait); return BFA_FALSE; } /* * build i/o request message next */ m->io_tag = cpu_to_be16(ioim->iotag); m->rport_hdl = ioim->itnim->rport->fw_handle; m->io_timeout = 0; sge = &m->sges[0]; sgpg = ioim->sgpg; sge_id = 0; sgpge = NULL; pgcumsz = 0; scsi_for_each_sg(cmnd, sg, ioim->nsges, i) { if (i == 0) { /* build inline IO SG element */ addr = bfa_sgaddr_le(sg_dma_address(sg)); sge->sga = *(union bfi_addr_u *) &addr; pgdlen = sg_dma_len(sg); sge->sg_len = pgdlen; sge->flags = (ioim->nsges > BFI_SGE_INLINE) ? BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST; bfa_sge_to_be(sge); sge++; } else { if (sge_id == 0) sgpge = sgpg->sgpg->sges; addr = bfa_sgaddr_le(sg_dma_address(sg)); sgpge->sga = *(union bfi_addr_u *) &addr; sgpge->sg_len = sg_dma_len(sg); pgcumsz += sgpge->sg_len; /* set flags */ if (i < (ioim->nsges - 1) && sge_id < (BFI_SGPG_DATA_SGES - 1)) sgpge->flags = BFI_SGE_DATA; else if (i < (ioim->nsges - 1)) sgpge->flags = BFI_SGE_DATA_CPL; else sgpge->flags = BFI_SGE_DATA_LAST; bfa_sge_to_le(sgpge); sgpge++; if (i == (ioim->nsges - 1)) { sgpge->flags = BFI_SGE_PGDLEN; sgpge->sga.a32.addr_lo = 0; sgpge->sga.a32.addr_hi = 0; sgpge->sg_len = pgcumsz; bfa_sge_to_le(sgpge); } else if (++sge_id == BFI_SGPG_DATA_SGES) { sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg); sgpge->flags = BFI_SGE_LINK; sgpge->sga = sgpg->sgpg_pa; sgpge->sg_len = pgcumsz; bfa_sge_to_le(sgpge); sge_id = 0; pgcumsz = 0; } } } if (ioim->nsges > BFI_SGE_INLINE) { sge->sga = ioim->sgpg->sgpg_pa; } else { sge->sga.a32.addr_lo = 0; sge->sga.a32.addr_hi = 0; } sge->sg_len = pgdlen; sge->flags = BFI_SGE_PGDLEN; bfa_sge_to_be(sge); /* * set up I/O command parameters */ m->cmnd = cmnd_z0; int_to_scsilun(cmnd->device->lun, &m->cmnd.lun); dmadir = cmnd->sc_data_direction; if (dmadir == DMA_TO_DEVICE) m->cmnd.iodir = FCP_IODIR_WRITE; else if (dmadir == DMA_FROM_DEVICE) m->cmnd.iodir = FCP_IODIR_READ; else m->cmnd.iodir = FCP_IODIR_NONE; m->cmnd.cdb = *(struct scsi_cdb_s *) cmnd->cmnd; fcp_dl = scsi_bufflen(cmnd); m->cmnd.fcp_dl = cpu_to_be32(fcp_dl); /* * set up I/O message header */ switch (m->cmnd.iodir) { case FCP_IODIR_READ: bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_fn_lpu(ioim->bfa)); bfa_stats(itnim, input_reqs); ioim->itnim->stats.rd_throughput += fcp_dl; break; case FCP_IODIR_WRITE: bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_fn_lpu(ioim->bfa)); bfa_stats(itnim, output_reqs); ioim->itnim->stats.wr_throughput += fcp_dl; break; case FCP_IODIR_RW: bfa_stats(itnim, input_reqs); bfa_stats(itnim, output_reqs); fallthrough; default: bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa)); } if (itnim->seq_rec || (scsi_bufflen(cmnd) & (sizeof(u32) - 1))) bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa)); /* * queue I/O message to firmware */ bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh); return BFA_TRUE; } /* * Setup any additional SG pages needed.Inline SG element is setup * at queuing time. */ static bfa_boolean_t bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim) { u16 nsgpgs; WARN_ON(ioim->nsges <= BFI_SGE_INLINE); /* * allocate SG pages needed */ nsgpgs = BFA_SGPG_NPAGE(ioim->nsges); if (!nsgpgs) return BFA_TRUE; if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs) != BFA_STATUS_OK) { bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs); return BFA_FALSE; } ioim->nsgpgs = nsgpgs; ioim->sgpg = bfa_q_first(&ioim->sgpg_q); return BFA_TRUE; } /* * Send I/O abort request to firmware. */ static bfa_boolean_t bfa_ioim_send_abort(struct bfa_ioim_s *ioim) { struct bfi_ioim_abort_req_s *m; enum bfi_ioim_h2i msgop; /* * check for room in queue to send request now */ m = bfa_reqq_next(ioim->bfa, ioim->reqq); if (!m) return BFA_FALSE; /* * build i/o request message next */ if (ioim->iosp->abort_explicit) msgop = BFI_IOIM_H2I_IOABORT_REQ; else msgop = BFI_IOIM_H2I_IOCLEANUP_REQ; bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_fn_lpu(ioim->bfa)); m->io_tag = cpu_to_be16(ioim->iotag); m->abort_tag = ++ioim->abort_tag; /* * queue I/O message to firmware */ bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh); return BFA_TRUE; } /* * Call to resume any I/O requests waiting for room in request queue. */ static void bfa_ioim_qresume(void *cbarg) { struct bfa_ioim_s *ioim = cbarg; bfa_stats(ioim->itnim, qresumes); bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME); } static void bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim) { /* * Move IO from itnim queue to fcpim global queue since itnim will be * freed. */ list_del(&ioim->qe); list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); if (!ioim->iosp->tskim) { if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) { bfa_cb_dequeue(&ioim->hcb_qe); list_del(&ioim->qe); list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q); } bfa_itnim_iodone(ioim->itnim); } else bfa_wc_down(&ioim->iosp->tskim->wc); } static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim) { if ((bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit) && (!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim))) || (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort)) || (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort_qfull)) || (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb)) || (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb_free)) || (bfa_sm_cmp_state(ioim, bfa_ioim_sm_resfree))) return BFA_FALSE; return BFA_TRUE; } void bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov) { /* * If path tov timer expired, failback with PATHTOV status - these * IO requests are not normally retried by IO stack. * * Otherwise device cameback online and fail it with normal failed * status so that IO stack retries these failed IO requests. */ if (iotov) ioim->io_cbfn = __bfa_cb_ioim_pathtov; else { ioim->io_cbfn = __bfa_cb_ioim_failed; bfa_stats(ioim->itnim, iocom_nexus_abort); } bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); /* * Move IO to fcpim global queue since itnim will be * freed. */ list_del(&ioim->qe); list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); } /* * Memory allocation and initialization. */ void bfa_ioim_attach(struct bfa_fcpim_s *fcpim) { struct bfa_ioim_s *ioim; struct bfa_fcp_mod_s *fcp = fcpim->fcp; struct bfa_ioim_sp_s *iosp; u16 i; /* * claim memory first */ ioim = (struct bfa_ioim_s *) bfa_mem_kva_curp(fcp); fcpim->ioim_arr = ioim; bfa_mem_kva_curp(fcp) = (u8 *) (ioim + fcpim->fcp->num_ioim_reqs); iosp = (struct bfa_ioim_sp_s *) bfa_mem_kva_curp(fcp); fcpim->ioim_sp_arr = iosp; bfa_mem_kva_curp(fcp) = (u8 *) (iosp + fcpim->fcp->num_ioim_reqs); /* * Initialize ioim free queues */ INIT_LIST_HEAD(&fcpim->ioim_resfree_q); INIT_LIST_HEAD(&fcpim->ioim_comp_q); for (i = 0; i < fcpim->fcp->num_ioim_reqs; i++, ioim++, iosp++) { /* * initialize IOIM */ memset(ioim, 0, sizeof(struct bfa_ioim_s)); ioim->iotag = i; ioim->bfa = fcpim->bfa; ioim->fcpim = fcpim; ioim->iosp = iosp; INIT_LIST_HEAD(&ioim->sgpg_q); bfa_reqq_winit(&ioim->iosp->reqq_wait, bfa_ioim_qresume, ioim); bfa_sgpg_winit(&ioim->iosp->sgpg_wqe, bfa_ioim_sgpg_alloced, ioim); bfa_sm_set_state(ioim, bfa_ioim_sm_uninit); } } void bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) { struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m; struct bfa_ioim_s *ioim; u16 iotag; enum bfa_ioim_event evt = BFA_IOIM_SM_COMP; iotag = be16_to_cpu(rsp->io_tag); ioim = BFA_IOIM_FROM_TAG(fcpim, iotag); WARN_ON(ioim->iotag != iotag); bfa_trc(ioim->bfa, ioim->iotag); bfa_trc(ioim->bfa, rsp->io_status); bfa_trc(ioim->bfa, rsp->reuse_io_tag); if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active)) ioim->iosp->comp_rspmsg = *m; switch (rsp->io_status) { case BFI_IOIM_STS_OK: bfa_stats(ioim->itnim, iocomp_ok); if (rsp->reuse_io_tag == 0) evt = BFA_IOIM_SM_DONE; else evt = BFA_IOIM_SM_COMP; break; case BFI_IOIM_STS_TIMEDOUT: bfa_stats(ioim->itnim, iocomp_timedout); fallthrough; case BFI_IOIM_STS_ABORTED: rsp->io_status = BFI_IOIM_STS_ABORTED; bfa_stats(ioim->itnim, iocomp_aborted); if (rsp->reuse_io_tag == 0) evt = BFA_IOIM_SM_DONE; else evt = BFA_IOIM_SM_COMP; break; case BFI_IOIM_STS_PROTO_ERR: bfa_stats(ioim->itnim, iocom_proto_err); WARN_ON(!rsp->reuse_io_tag); evt = BFA_IOIM_SM_COMP; break; case BFI_IOIM_STS_SQER_NEEDED: bfa_stats(ioim->itnim, iocom_sqer_needed); WARN_ON(rsp->reuse_io_tag != 0); evt = BFA_IOIM_SM_SQRETRY; break; case BFI_IOIM_STS_RES_FREE: bfa_stats(ioim->itnim, iocom_res_free); evt = BFA_IOIM_SM_FREE; break; case BFI_IOIM_STS_HOST_ABORTED: bfa_stats(ioim->itnim, iocom_hostabrts); if (rsp->abort_tag != ioim->abort_tag) { bfa_trc(ioim->bfa, rsp->abort_tag); bfa_trc(ioim->bfa, ioim->abort_tag); return; } if (rsp->reuse_io_tag) evt = BFA_IOIM_SM_ABORT_COMP; else evt = BFA_IOIM_SM_ABORT_DONE; break; case BFI_IOIM_STS_UTAG: bfa_stats(ioim->itnim, iocom_utags); evt = BFA_IOIM_SM_COMP_UTAG; break; default: WARN_ON(1); } bfa_sm_send_event(ioim, evt); } void bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m) { struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m; struct bfa_ioim_s *ioim; u16 iotag; iotag = be16_to_cpu(rsp->io_tag); ioim = BFA_IOIM_FROM_TAG(fcpim, iotag); WARN_ON(ioim->iotag != iotag); bfa_ioim_cb_profile_comp(fcpim, ioim); bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD); } /* * Called by itnim to clean up IO while going offline. */ void bfa_ioim_cleanup(struct bfa_ioim_s *ioim) { bfa_trc(ioim->bfa, ioim->iotag); bfa_stats(ioim->itnim, io_cleanups); ioim->iosp->tskim = NULL; bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP); } void bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim) { bfa_trc(ioim->bfa, ioim->iotag); bfa_stats(ioim->itnim, io_tmaborts); ioim->iosp->tskim = tskim; bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP); } /* * IOC failure handling. */ void bfa_ioim_iocdisable(struct bfa_ioim_s *ioim) { bfa_trc(ioim->bfa, ioim->iotag); bfa_stats(ioim->itnim, io_iocdowns); bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL); } /* * IO offline TOV popped. Fail the pending IO. */ void bfa_ioim_tov(struct bfa_ioim_s *ioim) { bfa_trc(ioim->bfa, ioim->iotag); bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV); } /* * Allocate IOIM resource for initiator mode I/O request. */ struct bfa_ioim_s * bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio, struct bfa_itnim_s *itnim, u16 nsges) { struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); struct bfa_ioim_s *ioim; struct bfa_iotag_s *iotag = NULL; /* * alocate IOIM resource */ bfa_q_deq(&fcpim->fcp->iotag_ioim_free_q, &iotag); if (!iotag) { bfa_stats(itnim, no_iotags); return NULL; } ioim = BFA_IOIM_FROM_TAG(fcpim, iotag->tag); ioim->dio = dio; ioim->itnim = itnim; ioim->nsges = nsges; ioim->nsgpgs = 0; bfa_stats(itnim, total_ios); fcpim->ios_active++; list_add_tail(&ioim->qe, &itnim->io_q); return ioim; } void bfa_ioim_free(struct bfa_ioim_s *ioim) { struct bfa_fcpim_s *fcpim = ioim->fcpim; struct bfa_iotag_s *iotag; if (ioim->nsgpgs > 0) bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs); bfa_stats(ioim->itnim, io_comps); fcpim->ios_active--; ioim->iotag &= BFA_IOIM_IOTAG_MASK; WARN_ON(!(ioim->iotag < (fcpim->fcp->num_ioim_reqs + fcpim->fcp->num_fwtio_reqs))); iotag = BFA_IOTAG_FROM_TAG(fcpim->fcp, ioim->iotag); if (ioim->iotag < fcpim->fcp->num_ioim_reqs) list_add_tail(&iotag->qe, &fcpim->fcp->iotag_ioim_free_q); else list_add_tail(&iotag->qe, &fcpim->fcp->iotag_tio_free_q); list_del(&ioim->qe); } void bfa_ioim_start(struct bfa_ioim_s *ioim) { bfa_ioim_cb_profile_start(ioim->fcpim, ioim); /* * Obtain the queue over which this request has to be issued */ ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ? BFA_FALSE : bfa_itnim_get_reqq(ioim); bfa_sm_send_event(ioim, BFA_IOIM_SM_START); } /* * Driver I/O abort request. */ bfa_status_t bfa_ioim_abort(struct bfa_ioim_s *ioim) { bfa_trc(ioim->bfa, ioim->iotag); if (!bfa_ioim_is_abortable(ioim)) return BFA_STATUS_FAILED; bfa_stats(ioim->itnim, io_aborts); bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT); return BFA_STATUS_OK; } /* * BFA TSKIM state machine functions */ /* * Task management command beginning state. */ static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) { bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event); switch (event) { case BFA_TSKIM_SM_START: bfa_sm_set_state(tskim, bfa_tskim_sm_active); bfa_tskim_gather_ios(tskim); /* * If device is offline, do not send TM on wire. Just cleanup * any pending IO requests and complete TM request. */ if (!bfa_itnim_is_online(tskim->itnim)) { bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup); tskim->tsk_status = BFI_TSKIM_STS_OK; bfa_tskim_cleanup_ios(tskim); return; } if (!bfa_tskim_send(tskim)) { bfa_sm_set_state(tskim, bfa_tskim_sm_qfull); bfa_stats(tskim->itnim, tm_qwait); bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq, &tskim->reqq_wait); } break; default: bfa_sm_fault(tskim->bfa, event); } } /* * TM command is active, awaiting completion from firmware to * cleanup IO requests in TM scope. */ static void bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) { bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event); switch (event) { case BFA_TSKIM_SM_DONE: bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup); bfa_tskim_cleanup_ios(tskim); break; case BFA_TSKIM_SM_CLEANUP: bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup); if (!bfa_tskim_send_abort(tskim)) { bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull); bfa_stats(tskim->itnim, tm_qwait); bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq, &tskim->reqq_wait); } break; case BFA_TSKIM_SM_HWFAIL: bfa_sm_set_state(tskim, bfa_tskim_sm_hcb); bfa_tskim_iocdisable_ios(tskim); bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed); break; default: bfa_sm_fault(tskim->bfa, event); } } /* * An active TM is being cleaned up since ITN is offline. Awaiting cleanup * completion event from firmware. */ static void bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) { bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event); switch (event) { case BFA_TSKIM_SM_DONE: /* * Ignore and wait for ABORT completion from firmware. */ break; case BFA_TSKIM_SM_UTAG: case BFA_TSKIM_SM_CLEANUP_DONE: bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup); bfa_tskim_cleanup_ios(tskim); break; case BFA_TSKIM_SM_HWFAIL: bfa_sm_set_state(tskim, bfa_tskim_sm_hcb); bfa_tskim_iocdisable_ios(tskim); bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed); break; default: bfa_sm_fault(tskim->bfa, event); } } static void bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) { bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event); switch (event) { case BFA_TSKIM_SM_IOS_DONE: bfa_sm_set_state(tskim, bfa_tskim_sm_hcb); bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done); break; case BFA_TSKIM_SM_CLEANUP: /* * Ignore, TM command completed on wire. * Notify TM conmpletion on IO cleanup completion. */ break; case BFA_TSKIM_SM_HWFAIL: bfa_sm_set_state(tskim, bfa_tskim_sm_hcb); bfa_tskim_iocdisable_ios(tskim); bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed); break; default: bfa_sm_fault(tskim->bfa, event); } } /* * Task management command is waiting for room in request CQ */ static void bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) { bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event); switch (event) { case BFA_TSKIM_SM_QRESUME: bfa_sm_set_state(tskim, bfa_tskim_sm_active); bfa_tskim_send(tskim); break; case BFA_TSKIM_SM_CLEANUP: /* * No need to send TM on wire since ITN is offline. */ bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup); bfa_reqq_wcancel(&tskim->reqq_wait); bfa_tskim_cleanup_ios(tskim); break; case BFA_TSKIM_SM_HWFAIL: bfa_sm_set_state(tskim, bfa_tskim_sm_hcb); bfa_reqq_wcancel(&tskim->reqq_wait); bfa_tskim_iocdisable_ios(tskim); bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed); break; default: bfa_sm_fault(tskim->bfa, event); } } /* * Task management command is active, awaiting for room in request CQ * to send clean up request. */ static void bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) { bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event); switch (event) { case BFA_TSKIM_SM_DONE: bfa_reqq_wcancel(&tskim->reqq_wait); fallthrough; case BFA_TSKIM_SM_QRESUME: bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup); bfa_tskim_send_abort(tskim); break; case BFA_TSKIM_SM_HWFAIL: bfa_sm_set_state(tskim, bfa_tskim_sm_hcb); bfa_reqq_wcancel(&tskim->reqq_wait); bfa_tskim_iocdisable_ios(tskim); bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed); break; default: bfa_sm_fault(tskim->bfa, event); } } /* * BFA callback is pending */ static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) { bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event); switch (event) { case BFA_TSKIM_SM_HCB: bfa_sm_set_state(tskim, bfa_tskim_sm_uninit); bfa_tskim_free(tskim); break; case BFA_TSKIM_SM_CLEANUP: bfa_tskim_notify_comp(tskim); break; case BFA_TSKIM_SM_HWFAIL: break; default: bfa_sm_fault(tskim->bfa, event); } } static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete) { struct bfa_tskim_s *tskim = cbarg; if (!complete) { bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB); return; } bfa_stats(tskim->itnim, tm_success); bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, tskim->tsk_status); } static void __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete) { struct bfa_tskim_s *tskim = cbarg; if (!complete) { bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB); return; } bfa_stats(tskim->itnim, tm_failures); bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, BFI_TSKIM_STS_FAILED); } static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim, struct scsi_lun lun) { switch (tskim->tm_cmnd) { case FCP_TM_TARGET_RESET: return BFA_TRUE; case FCP_TM_ABORT_TASK_SET: case FCP_TM_CLEAR_TASK_SET: case FCP_TM_LUN_RESET: case FCP_TM_CLEAR_ACA: return !memcmp(&tskim->lun, &lun, sizeof(lun)); default: WARN_ON(1); } return BFA_FALSE; } /* * Gather affected IO requests and task management commands. */ static void bfa_tskim_gather_ios(struct bfa_tskim_s *tskim) { struct bfa_itnim_s *itnim = tskim->itnim; struct bfa_ioim_s *ioim; struct list_head *qe, *qen; struct scsi_cmnd *cmnd; struct scsi_lun scsilun; INIT_LIST_HEAD(&tskim->io_q); /* * Gather any active IO requests first. */ list_for_each_safe(qe, qen, &itnim->io_q) { ioim = (struct bfa_ioim_s *) qe; cmnd = (struct scsi_cmnd *) ioim->dio; int_to_scsilun(cmnd->device->lun, &scsilun); if (bfa_tskim_match_scope(tskim, scsilun)) { list_del(&ioim->qe); list_add_tail(&ioim->qe, &tskim->io_q); } } /* * Failback any pending IO requests immediately. */ list_for_each_safe(qe, qen, &itnim->pending_q) { ioim = (struct bfa_ioim_s *) qe; cmnd = (struct scsi_cmnd *) ioim->dio; int_to_scsilun(cmnd->device->lun, &scsilun); if (bfa_tskim_match_scope(tskim, scsilun)) { list_del(&ioim->qe); list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); bfa_ioim_tov(ioim); } } } /* * IO cleanup completion */ static void bfa_tskim_cleanp_comp(void *tskim_cbarg) { struct bfa_tskim_s *tskim = tskim_cbarg; bfa_stats(tskim->itnim, tm_io_comps); bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE); } /* * Gather affected IO requests and task management commands. */ static void bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim) { struct bfa_ioim_s *ioim; struct list_head *qe, *qen; bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim); list_for_each_safe(qe, qen, &tskim->io_q) { ioim = (struct bfa_ioim_s *) qe; bfa_wc_up(&tskim->wc); bfa_ioim_cleanup_tm(ioim, tskim); } bfa_wc_wait(&tskim->wc); } /* * Send task management request to firmware. */ static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim) { struct bfa_itnim_s *itnim = tskim->itnim; struct bfi_tskim_req_s *m; /* * check for room in queue to send request now */ m = bfa_reqq_next(tskim->bfa, itnim->reqq); if (!m) return BFA_FALSE; /* * build i/o request message next */ bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ, bfa_fn_lpu(tskim->bfa)); m->tsk_tag = cpu_to_be16(tskim->tsk_tag); m->itn_fhdl = tskim->itnim->rport->fw_handle; m->t_secs = tskim->tsecs; m->lun = tskim->lun; m->tm_flags = tskim->tm_cmnd; /* * queue I/O message to firmware */ bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh); return BFA_TRUE; } /* * Send abort request to cleanup an active TM to firmware. */ static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim) { struct bfa_itnim_s *itnim = tskim->itnim; struct bfi_tskim_abortreq_s *m; /* * check for room in queue to send request now */ m = bfa_reqq_next(tskim->bfa, itnim->reqq); if (!m) return BFA_FALSE; /* * build i/o request message next */ bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ, bfa_fn_lpu(tskim->bfa)); m->tsk_tag = cpu_to_be16(tskim->tsk_tag); /* * queue I/O message to firmware */ bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh); return BFA_TRUE; } /* * Call to resume task management cmnd waiting for room in request queue. */ static void bfa_tskim_qresume(void *cbarg) { struct bfa_tskim_s *tskim = cbarg; bfa_stats(tskim->itnim, tm_qresumes); bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME); } /* * Cleanup IOs associated with a task mangement command on IOC failures. */ static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim) { struct bfa_ioim_s *ioim; struct list_head *qe, *qen; list_for_each_safe(qe, qen, &tskim->io_q) { ioim = (struct bfa_ioim_s *) qe; bfa_ioim_iocdisable(ioim); } } /* * Notification on completions from related ioim. */ void bfa_tskim_iodone(struct bfa_tskim_s *tskim) { bfa_wc_down(&tskim->wc); } /* * Handle IOC h/w failure notification from itnim. */ void bfa_tskim_iocdisable(struct bfa_tskim_s *tskim) { tskim->notify = BFA_FALSE; bfa_stats(tskim->itnim, tm_iocdowns); bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL); } /* * Cleanup TM command and associated IOs as part of ITNIM offline. */ void bfa_tskim_cleanup(struct bfa_tskim_s *tskim) { tskim->notify = BFA_TRUE; bfa_stats(tskim->itnim, tm_cleanups); bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP); } /* * Memory allocation and initialization. */ void bfa_tskim_attach(struct bfa_fcpim_s *fcpim) { struct bfa_tskim_s *tskim; struct bfa_fcp_mod_s *fcp = fcpim->fcp; u16 i; INIT_LIST_HEAD(&fcpim->tskim_free_q); INIT_LIST_HEAD(&fcpim->tskim_unused_q); tskim = (struct bfa_tskim_s *) bfa_mem_kva_curp(fcp); fcpim->tskim_arr = tskim; for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) { /* * initialize TSKIM */ memset(tskim, 0, sizeof(struct bfa_tskim_s)); tskim->tsk_tag = i; tskim->bfa = fcpim->bfa; tskim->fcpim = fcpim; tskim->notify = BFA_FALSE; bfa_reqq_winit(&tskim->reqq_wait, bfa_tskim_qresume, tskim); bfa_sm_set_state(tskim, bfa_tskim_sm_uninit); list_add_tail(&tskim->qe, &fcpim->tskim_free_q); } bfa_mem_kva_curp(fcp) = (u8 *) tskim; } void bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) { struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m; struct bfa_tskim_s *tskim; u16 tsk_tag = be16_to_cpu(rsp->tsk_tag); tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag); WARN_ON(tskim->tsk_tag != tsk_tag); tskim->tsk_status = rsp->tsk_status; /* * Firmware sends BFI_TSKIM_STS_ABORTED status for abort * requests. All other statuses are for normal completions. */ if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) { bfa_stats(tskim->itnim, tm_cleanup_comps); bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE); } else if (rsp->tsk_status == BFI_TSKIM_STS_UTAG) { bfa_sm_send_event(tskim, BFA_TSKIM_SM_UTAG); } else { bfa_stats(tskim->itnim, tm_fw_rsps); bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE); } } struct bfa_tskim_s * bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk) { struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); struct bfa_tskim_s *tskim; bfa_q_deq(&fcpim->tskim_free_q, &tskim); if (tskim) tskim->dtsk = dtsk; return tskim; } void bfa_tskim_free(struct bfa_tskim_s *tskim) { WARN_ON(!bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe)); list_del(&tskim->qe); list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q); } /* * Start a task management command. * * @param[in] tskim BFA task management command instance * @param[in] itnim i-t nexus for the task management command * @param[in] lun lun, if applicable * @param[in] tm_cmnd Task management command code. * @param[in] t_secs Timeout in seconds * * @return None. */ void bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim, struct scsi_lun lun, enum fcp_tm_cmnd tm_cmnd, u8 tsecs) { tskim->itnim = itnim; tskim->lun = lun; tskim->tm_cmnd = tm_cmnd; tskim->tsecs = tsecs; tskim->notify = BFA_FALSE; bfa_stats(itnim, tm_cmnds); list_add_tail(&tskim->qe, &itnim->tsk_q); bfa_sm_send_event(tskim, BFA_TSKIM_SM_START); } void bfa_tskim_res_recfg(struct bfa_s *bfa, u16 num_tskim_fw) { struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); struct list_head *qe; int i; for (i = 0; i < (fcpim->num_tskim_reqs - num_tskim_fw); i++) { bfa_q_deq_tail(&fcpim->tskim_free_q, &qe); list_add_tail(qe, &fcpim->tskim_unused_q); } } void bfa_fcp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo, struct bfa_s *bfa) { struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa); struct bfa_mem_kva_s *fcp_kva = BFA_MEM_FCP_KVA(bfa); struct bfa_mem_dma_s *seg_ptr; u16 nsegs, idx, per_seg_ios, num_io_req; u32 km_len = 0; /* * ZERO for num_ioim_reqs and num_fwtio_reqs is allowed config value. * So if the values are non zero, adjust them appropriately. */ if (cfg->fwcfg.num_ioim_reqs && cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN) cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN; else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX) cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX; if (cfg->fwcfg.num_fwtio_reqs > BFA_FWTIO_MAX) cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX; num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs); if (num_io_req > BFA_IO_MAX) { if (cfg->fwcfg.num_ioim_reqs && cfg->fwcfg.num_fwtio_reqs) { cfg->fwcfg.num_ioim_reqs = BFA_IO_MAX/2; cfg->fwcfg.num_fwtio_reqs = BFA_IO_MAX/2; } else if (cfg->fwcfg.num_fwtio_reqs) cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX; else cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX; } bfa_fcpim_meminfo(cfg, &km_len); num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs); km_len += num_io_req * sizeof(struct bfa_iotag_s); km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itn_s); /* dma memory */ nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN); per_seg_ios = BFI_MEM_NREQS_SEG(BFI_IOIM_SNSLEN); bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) { if (num_io_req >= per_seg_ios) { num_io_req -= per_seg_ios; bfa_mem_dma_setup(minfo, seg_ptr, per_seg_ios * BFI_IOIM_SNSLEN); } else bfa_mem_dma_setup(minfo, seg_ptr, num_io_req * BFI_IOIM_SNSLEN); } /* kva memory */ bfa_mem_kva_setup(minfo, fcp_kva, km_len); } void bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev) { struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa); struct bfa_mem_dma_s *seg_ptr; u16 idx, nsegs, num_io_req; fcp->max_ioim_reqs = cfg->fwcfg.num_ioim_reqs; fcp->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs; fcp->num_fwtio_reqs = cfg->fwcfg.num_fwtio_reqs; fcp->num_itns = cfg->fwcfg.num_rports; fcp->bfa = bfa; /* * Setup the pool of snsbase addr's, that is passed to fw as * part of bfi_iocfc_cfg_s. */ num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs); nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN); bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) { if (!bfa_mem_dma_virt(seg_ptr)) break; fcp->snsbase[idx].pa = bfa_mem_dma_phys(seg_ptr); fcp->snsbase[idx].kva = bfa_mem_dma_virt(seg_ptr); bfa_iocfc_set_snsbase(bfa, idx, fcp->snsbase[idx].pa); } fcp->throttle_update_required = 1; bfa_fcpim_attach(fcp, bfad, cfg, pcidev); bfa_iotag_attach(fcp); fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp); bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr + (fcp->num_itns * sizeof(struct bfa_itn_s)); memset(fcp->itn_arr, 0, (fcp->num_itns * sizeof(struct bfa_itn_s))); } void bfa_fcp_iocdisable(struct bfa_s *bfa) { struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa); bfa_fcpim_iocdisable(fcp); } void bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw, u16 max_ioim_fw) { struct bfa_fcp_mod_s *mod = BFA_FCP_MOD(bfa); struct list_head *qe; int i; /* Update io throttle value only once during driver load time */ if (!mod->throttle_update_required) return; for (i = 0; i < (mod->num_ioim_reqs - num_ioim_fw); i++) { bfa_q_deq_tail(&mod->iotag_ioim_free_q, &qe); list_add_tail(qe, &mod->iotag_unused_q); } if (mod->num_ioim_reqs != num_ioim_fw) { bfa_trc(bfa, mod->num_ioim_reqs); bfa_trc(bfa, num_ioim_fw); } mod->max_ioim_reqs = max_ioim_fw; mod->num_ioim_reqs = num_ioim_fw; mod->throttle_update_required = 0; } void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m)) { struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa); struct bfa_itn_s *itn; itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag); itn->isr = isr; } /* * Itn interrupt processing. */ void bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m) { struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa); union bfi_itn_i2h_msg_u msg; struct bfa_itn_s *itn; msg.msg = m; itn = BFA_ITN_FROM_TAG(fcp, msg.create_rsp->bfa_handle); if (itn->isr) itn->isr(bfa, m); else WARN_ON(1); } void bfa_iotag_attach(struct bfa_fcp_mod_s *fcp) { struct bfa_iotag_s *iotag; u16 num_io_req, i; iotag = (struct bfa_iotag_s *) bfa_mem_kva_curp(fcp); fcp->iotag_arr = iotag; INIT_LIST_HEAD(&fcp->iotag_ioim_free_q); INIT_LIST_HEAD(&fcp->iotag_tio_free_q); INIT_LIST_HEAD(&fcp->iotag_unused_q); num_io_req = fcp->num_ioim_reqs + fcp->num_fwtio_reqs; for (i = 0; i < num_io_req; i++, iotag++) { memset(iotag, 0, sizeof(struct bfa_iotag_s)); iotag->tag = i; if (i < fcp->num_ioim_reqs) list_add_tail(&iotag->qe, &fcp->iotag_ioim_free_q); else list_add_tail(&iotag->qe, &fcp->iotag_tio_free_q); } bfa_mem_kva_curp(fcp) = (u8 *) iotag; } /* * To send config req, first try to use throttle value from flash * If 0, then use driver parameter * We need to use min(flash_val, drv_val) because * memory allocation was done based on this cfg'd value */ u16 bfa_fcpim_get_throttle_cfg(struct bfa_s *bfa, u16 drv_cfg_param) { u16 tmp; struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa); /* * If throttle value from flash is already in effect after driver is * loaded then until next load, always return current value instead * of actual flash value */ if (!fcp->throttle_update_required) return (u16)fcp->num_ioim_reqs; tmp = bfa_dconf_read_data_valid(bfa) ? bfa_fcpim_read_throttle(bfa) : 0; if (!tmp || (tmp > drv_cfg_param)) tmp = drv_cfg_param; return tmp; } bfa_status_t bfa_fcpim_write_throttle(struct bfa_s *bfa, u16 value) { if (!bfa_dconf_get_min_cfg(bfa)) { BFA_DCONF_MOD(bfa)->dconf->throttle_cfg.value = value; BFA_DCONF_MOD(bfa)->dconf->throttle_cfg.is_valid = 1; return BFA_STATUS_OK; } return BFA_STATUS_FAILED; } u16 bfa_fcpim_read_throttle(struct bfa_s *bfa) { struct bfa_throttle_cfg_s *throttle_cfg = &(BFA_DCONF_MOD(bfa)->dconf->throttle_cfg); return ((!bfa_dconf_get_min_cfg(bfa)) ? ((throttle_cfg->is_valid == 1) ? (throttle_cfg->value) : 0) : 0); } bfa_status_t bfa_fcpim_throttle_set(struct bfa_s *bfa, u16 value) { /* in min cfg no commands should run. */ if ((bfa_dconf_get_min_cfg(bfa) == BFA_TRUE) || (!bfa_dconf_read_data_valid(bfa))) return BFA_STATUS_FAILED; bfa_fcpim_write_throttle(bfa, value); return bfa_dconf_update(bfa); } bfa_status_t bfa_fcpim_throttle_get(struct bfa_s *bfa, void *buf) { struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); struct bfa_defs_fcpim_throttle_s throttle; if ((bfa_dconf_get_min_cfg(bfa) == BFA_TRUE) || (!bfa_dconf_read_data_valid(bfa))) return BFA_STATUS_FAILED; memset(&throttle, 0, sizeof(struct bfa_defs_fcpim_throttle_s)); throttle.cur_value = (u16)(fcpim->fcp->num_ioim_reqs); throttle.cfg_value = bfa_fcpim_read_throttle(bfa); if (!throttle.cfg_value) throttle.cfg_value = throttle.cur_value; throttle.max_value = (u16)(fcpim->fcp->max_ioim_reqs); memcpy(buf, &throttle, sizeof(struct bfa_defs_fcpim_throttle_s)); return BFA_STATUS_OK; }
linux-master
drivers/scsi/bfa/bfa_fcpim.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. * Copyright (c) 2014- QLogic Corporation. * All rights reserved * www.qlogic.com * * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. */ /* * bfa_attr.c Linux driver configuration interface module. */ #include "bfad_drv.h" #include "bfad_im.h" /* * FC transport template entry, get SCSI target port ID. */ static void bfad_im_get_starget_port_id(struct scsi_target *starget) { struct Scsi_Host *shost; struct bfad_im_port_s *im_port; struct bfad_s *bfad; struct bfad_itnim_s *itnim = NULL; u32 fc_id = -1; unsigned long flags; shost = dev_to_shost(starget->dev.parent); im_port = (struct bfad_im_port_s *) shost->hostdata[0]; bfad = im_port->bfad; spin_lock_irqsave(&bfad->bfad_lock, flags); itnim = bfad_get_itnim(im_port, starget->id); if (itnim) fc_id = bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim); fc_starget_port_id(starget) = fc_id; spin_unlock_irqrestore(&bfad->bfad_lock, flags); } /* * FC transport template entry, get SCSI target nwwn. */ static void bfad_im_get_starget_node_name(struct scsi_target *starget) { struct Scsi_Host *shost; struct bfad_im_port_s *im_port; struct bfad_s *bfad; struct bfad_itnim_s *itnim = NULL; u64 node_name = 0; unsigned long flags; shost = dev_to_shost(starget->dev.parent); im_port = (struct bfad_im_port_s *) shost->hostdata[0]; bfad = im_port->bfad; spin_lock_irqsave(&bfad->bfad_lock, flags); itnim = bfad_get_itnim(im_port, starget->id); if (itnim) node_name = bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim); fc_starget_node_name(starget) = cpu_to_be64(node_name); spin_unlock_irqrestore(&bfad->bfad_lock, flags); } /* * FC transport template entry, get SCSI target pwwn. */ static void bfad_im_get_starget_port_name(struct scsi_target *starget) { struct Scsi_Host *shost; struct bfad_im_port_s *im_port; struct bfad_s *bfad; struct bfad_itnim_s *itnim = NULL; u64 port_name = 0; unsigned long flags; shost = dev_to_shost(starget->dev.parent); im_port = (struct bfad_im_port_s *) shost->hostdata[0]; bfad = im_port->bfad; spin_lock_irqsave(&bfad->bfad_lock, flags); itnim = bfad_get_itnim(im_port, starget->id); if (itnim) port_name = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim); fc_starget_port_name(starget) = cpu_to_be64(port_name); spin_unlock_irqrestore(&bfad->bfad_lock, flags); } /* * FC transport template entry, get SCSI host port ID. */ static void bfad_im_get_host_port_id(struct Scsi_Host *shost) { struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_port_s *port = im_port->port; fc_host_port_id(shost) = bfa_hton3b(bfa_fcs_lport_get_fcid(port->fcs_port)); } /* * FC transport template entry, get SCSI host port type. */ static void bfad_im_get_host_port_type(struct Scsi_Host *shost) { struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; struct bfa_lport_attr_s port_attr; bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr); switch (port_attr.port_type) { case BFA_PORT_TYPE_NPORT: fc_host_port_type(shost) = FC_PORTTYPE_NPORT; break; case BFA_PORT_TYPE_NLPORT: fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; break; case BFA_PORT_TYPE_P2P: fc_host_port_type(shost) = FC_PORTTYPE_PTP; break; case BFA_PORT_TYPE_LPORT: fc_host_port_type(shost) = FC_PORTTYPE_LPORT; break; default: fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN; break; } } /* * FC transport template entry, get SCSI host port state. */ static void bfad_im_get_host_port_state(struct Scsi_Host *shost) { struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; struct bfa_port_attr_s attr; bfa_fcport_get_attr(&bfad->bfa, &attr); switch (attr.port_state) { case BFA_PORT_ST_LINKDOWN: fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; break; case BFA_PORT_ST_LINKUP: fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; break; case BFA_PORT_ST_DISABLED: case BFA_PORT_ST_STOPPED: case BFA_PORT_ST_IOCDOWN: case BFA_PORT_ST_IOCDIS: fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; break; case BFA_PORT_ST_UNINIT: case BFA_PORT_ST_ENABLING_QWAIT: case BFA_PORT_ST_ENABLING: case BFA_PORT_ST_DISABLING_QWAIT: case BFA_PORT_ST_DISABLING: default: fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; break; } } /* * FC transport template entry, get SCSI host active fc4s. */ static void bfad_im_get_host_active_fc4s(struct Scsi_Host *shost) { struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_port_s *port = im_port->port; memset(fc_host_active_fc4s(shost), 0, sizeof(fc_host_active_fc4s(shost))); if (port->supported_fc4s & BFA_LPORT_ROLE_FCP_IM) fc_host_active_fc4s(shost)[2] = 1; fc_host_active_fc4s(shost)[7] = 1; } /* * FC transport template entry, get SCSI host link speed. */ static void bfad_im_get_host_speed(struct Scsi_Host *shost) { struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; struct bfa_port_attr_s attr; bfa_fcport_get_attr(&bfad->bfa, &attr); switch (attr.speed) { case BFA_PORT_SPEED_10GBPS: fc_host_speed(shost) = FC_PORTSPEED_10GBIT; break; case BFA_PORT_SPEED_16GBPS: fc_host_speed(shost) = FC_PORTSPEED_16GBIT; break; case BFA_PORT_SPEED_8GBPS: fc_host_speed(shost) = FC_PORTSPEED_8GBIT; break; case BFA_PORT_SPEED_4GBPS: fc_host_speed(shost) = FC_PORTSPEED_4GBIT; break; case BFA_PORT_SPEED_2GBPS: fc_host_speed(shost) = FC_PORTSPEED_2GBIT; break; case BFA_PORT_SPEED_1GBPS: fc_host_speed(shost) = FC_PORTSPEED_1GBIT; break; default: fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; break; } } /* * FC transport template entry, get SCSI host port type. */ static void bfad_im_get_host_fabric_name(struct Scsi_Host *shost) { struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_port_s *port = im_port->port; wwn_t fabric_nwwn = 0; fabric_nwwn = bfa_fcs_lport_get_fabric_name(port->fcs_port); fc_host_fabric_name(shost) = cpu_to_be64(fabric_nwwn); } /* * FC transport template entry, get BFAD statistics. */ static struct fc_host_statistics * bfad_im_get_stats(struct Scsi_Host *shost) { struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; struct bfad_hal_comp fcomp; union bfa_port_stats_u *fcstats; struct fc_host_statistics *hstats; bfa_status_t rc; unsigned long flags; fcstats = kzalloc(sizeof(union bfa_port_stats_u), GFP_KERNEL); if (fcstats == NULL) return NULL; hstats = &bfad->link_stats; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); memset(hstats, 0, sizeof(struct fc_host_statistics)); rc = bfa_port_get_stats(BFA_FCPORT(&bfad->bfa), fcstats, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (rc != BFA_STATUS_OK) { kfree(fcstats); return NULL; } wait_for_completion(&fcomp.comp); /* Fill the fc_host_statistics structure */ hstats->seconds_since_last_reset = fcstats->fc.secs_reset; hstats->tx_frames = fcstats->fc.tx_frames; hstats->tx_words = fcstats->fc.tx_words; hstats->rx_frames = fcstats->fc.rx_frames; hstats->rx_words = fcstats->fc.rx_words; hstats->lip_count = fcstats->fc.lip_count; hstats->nos_count = fcstats->fc.nos_count; hstats->error_frames = fcstats->fc.error_frames; hstats->dumped_frames = fcstats->fc.dropped_frames; hstats->link_failure_count = fcstats->fc.link_failures; hstats->loss_of_sync_count = fcstats->fc.loss_of_syncs; hstats->loss_of_signal_count = fcstats->fc.loss_of_signals; hstats->prim_seq_protocol_err_count = fcstats->fc.primseq_errs; hstats->invalid_crc_count = fcstats->fc.invalid_crcs; kfree(fcstats); return hstats; } /* * FC transport template entry, reset BFAD statistics. */ static void bfad_im_reset_stats(struct Scsi_Host *shost) { struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; struct bfad_hal_comp fcomp; unsigned long flags; bfa_status_t rc; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); rc = bfa_port_clear_stats(BFA_FCPORT(&bfad->bfa), bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (rc != BFA_STATUS_OK) return; wait_for_completion(&fcomp.comp); return; } /* * FC transport template entry, set rport loss timeout. * Update dev_loss_tmo based on the value pushed down by the stack * In case it is lesser than path_tov of driver, set it to path_tov + 1 * to ensure that the driver times out before the application */ static void bfad_im_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout) { struct bfad_itnim_data_s *itnim_data = rport->dd_data; struct bfad_itnim_s *itnim = itnim_data->itnim; struct bfad_s *bfad = itnim->im->bfad; uint16_t path_tov = bfa_fcpim_path_tov_get(&bfad->bfa); rport->dev_loss_tmo = timeout; if (timeout < path_tov) rport->dev_loss_tmo = path_tov + 1; } static int bfad_im_vport_create(struct fc_vport *fc_vport, bool disable) { char *vname = fc_vport->symbolic_name; struct Scsi_Host *shost = fc_vport->shost; struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; struct bfa_lport_cfg_s port_cfg; struct bfad_vport_s *vp; int status = 0, rc; unsigned long flags; memset(&port_cfg, 0, sizeof(port_cfg)); u64_to_wwn(fc_vport->node_name, (u8 *)&port_cfg.nwwn); u64_to_wwn(fc_vport->port_name, (u8 *)&port_cfg.pwwn); if (strlen(vname) > 0) strcpy((char *)&port_cfg.sym_name, vname); port_cfg.roles = BFA_LPORT_ROLE_FCP_IM; spin_lock_irqsave(&bfad->bfad_lock, flags); list_for_each_entry(vp, &bfad->pbc_vport_list, list_entry) { if (port_cfg.pwwn == vp->fcs_vport.lport.port_cfg.pwwn) { port_cfg.preboot_vp = vp->fcs_vport.lport.port_cfg.preboot_vp; break; } } spin_unlock_irqrestore(&bfad->bfad_lock, flags); rc = bfad_vport_create(bfad, 0, &port_cfg, &fc_vport->dev); if (rc == BFA_STATUS_OK) { struct bfad_vport_s *vport; struct bfa_fcs_vport_s *fcs_vport; struct Scsi_Host *vshost; spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, port_cfg.pwwn); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (fcs_vport == NULL) return VPCERR_BAD_WWN; fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE); if (disable) { spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_fcs_vport_stop(fcs_vport); spin_unlock_irqrestore(&bfad->bfad_lock, flags); fc_vport_set_state(fc_vport, FC_VPORT_DISABLED); } vport = fcs_vport->vport_drv; vshost = vport->drv_port.im_port->shost; fc_host_node_name(vshost) = wwn_to_u64((u8 *)&port_cfg.nwwn); fc_host_port_name(vshost) = wwn_to_u64((u8 *)&port_cfg.pwwn); fc_host_supported_classes(vshost) = FC_COS_CLASS3; memset(fc_host_supported_fc4s(vshost), 0, sizeof(fc_host_supported_fc4s(vshost))); /* For FCP type 0x08 */ if (supported_fc4s & BFA_LPORT_ROLE_FCP_IM) fc_host_supported_fc4s(vshost)[2] = 1; /* For fibre channel services type 0x20 */ fc_host_supported_fc4s(vshost)[7] = 1; fc_host_supported_speeds(vshost) = bfad_im_supported_speeds(&bfad->bfa); fc_host_maxframe_size(vshost) = bfa_fcport_get_maxfrsize(&bfad->bfa); fc_vport->dd_data = vport; vport->drv_port.im_port->fc_vport = fc_vport; } else if (rc == BFA_STATUS_INVALID_WWN) return VPCERR_BAD_WWN; else if (rc == BFA_STATUS_VPORT_EXISTS) return VPCERR_BAD_WWN; else if (rc == BFA_STATUS_VPORT_MAX) return VPCERR_NO_FABRIC_SUPP; else if (rc == BFA_STATUS_VPORT_WWN_BP) return VPCERR_BAD_WWN; else return FC_VPORT_FAILED; return status; } static int bfad_im_issue_fc_host_lip(struct Scsi_Host *shost) { struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; struct bfad_hal_comp fcomp; unsigned long flags; uint32_t status; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); status = bfa_port_disable(&bfad->bfa.modules.port, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (status != BFA_STATUS_OK) return -EIO; wait_for_completion(&fcomp.comp); if (fcomp.status != BFA_STATUS_OK) return -EIO; spin_lock_irqsave(&bfad->bfad_lock, flags); status = bfa_port_enable(&bfad->bfa.modules.port, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (status != BFA_STATUS_OK) return -EIO; wait_for_completion(&fcomp.comp); if (fcomp.status != BFA_STATUS_OK) return -EIO; return 0; } static int bfad_im_vport_delete(struct fc_vport *fc_vport) { struct bfad_vport_s *vport = (struct bfad_vport_s *)fc_vport->dd_data; struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) vport->drv_port.im_port; struct bfad_s *bfad = im_port->bfad; struct bfa_fcs_vport_s *fcs_vport; struct Scsi_Host *vshost; wwn_t pwwn; int rc; unsigned long flags; struct completion fcomp; if (im_port->flags & BFAD_PORT_DELETE) { bfad_scsi_host_free(bfad, im_port); list_del(&vport->list_entry); kfree(vport); return 0; } vshost = vport->drv_port.im_port->shost; u64_to_wwn(fc_host_port_name(vshost), (u8 *)&pwwn); spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, pwwn); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (fcs_vport == NULL) return VPCERR_BAD_WWN; vport->drv_port.flags |= BFAD_PORT_DELETE; vport->comp_del = &fcomp; init_completion(vport->comp_del); spin_lock_irqsave(&bfad->bfad_lock, flags); rc = bfa_fcs_vport_delete(&vport->fcs_vport); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (rc == BFA_STATUS_PBC) { vport->drv_port.flags &= ~BFAD_PORT_DELETE; vport->comp_del = NULL; return -1; } wait_for_completion(vport->comp_del); bfad_scsi_host_free(bfad, im_port); list_del(&vport->list_entry); kfree(vport); return 0; } static int bfad_im_vport_disable(struct fc_vport *fc_vport, bool disable) { struct bfad_vport_s *vport; struct bfad_s *bfad; struct bfa_fcs_vport_s *fcs_vport; struct Scsi_Host *vshost; wwn_t pwwn; unsigned long flags; vport = (struct bfad_vport_s *)fc_vport->dd_data; bfad = vport->drv_port.bfad; vshost = vport->drv_port.im_port->shost; u64_to_wwn(fc_host_port_name(vshost), (u8 *)&pwwn); spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, pwwn); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (fcs_vport == NULL) return VPCERR_BAD_WWN; if (disable) { bfa_fcs_vport_stop(fcs_vport); fc_vport_set_state(fc_vport, FC_VPORT_DISABLED); } else { bfa_fcs_vport_start(fcs_vport); fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE); } return 0; } static void bfad_im_vport_set_symbolic_name(struct fc_vport *fc_vport) { struct bfad_vport_s *vport = (struct bfad_vport_s *)fc_vport->dd_data; struct bfad_im_port_s *im_port = (struct bfad_im_port_s *)vport->drv_port.im_port; struct bfad_s *bfad = im_port->bfad; struct Scsi_Host *vshost = vport->drv_port.im_port->shost; char *sym_name = fc_vport->symbolic_name; struct bfa_fcs_vport_s *fcs_vport; wwn_t pwwn; unsigned long flags; u64_to_wwn(fc_host_port_name(vshost), (u8 *)&pwwn); spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, pwwn); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (fcs_vport == NULL) return; spin_lock_irqsave(&bfad->bfad_lock, flags); if (strlen(sym_name) > 0) bfa_fcs_lport_set_symname(&fcs_vport->lport, sym_name); spin_unlock_irqrestore(&bfad->bfad_lock, flags); } struct fc_function_template bfad_im_fc_function_template = { /* Target dynamic attributes */ .get_starget_port_id = bfad_im_get_starget_port_id, .show_starget_port_id = 1, .get_starget_node_name = bfad_im_get_starget_node_name, .show_starget_node_name = 1, .get_starget_port_name = bfad_im_get_starget_port_name, .show_starget_port_name = 1, /* Host dynamic attribute */ .get_host_port_id = bfad_im_get_host_port_id, .show_host_port_id = 1, /* Host fixed attributes */ .show_host_node_name = 1, .show_host_port_name = 1, .show_host_supported_classes = 1, .show_host_supported_fc4s = 1, .show_host_supported_speeds = 1, .show_host_maxframe_size = 1, /* More host dynamic attributes */ .show_host_port_type = 1, .get_host_port_type = bfad_im_get_host_port_type, .show_host_port_state = 1, .get_host_port_state = bfad_im_get_host_port_state, .show_host_active_fc4s = 1, .get_host_active_fc4s = bfad_im_get_host_active_fc4s, .show_host_speed = 1, .get_host_speed = bfad_im_get_host_speed, .show_host_fabric_name = 1, .get_host_fabric_name = bfad_im_get_host_fabric_name, .show_host_symbolic_name = 1, /* Statistics */ .get_fc_host_stats = bfad_im_get_stats, .reset_fc_host_stats = bfad_im_reset_stats, /* Allocation length for host specific data */ .dd_fcrport_size = sizeof(struct bfad_itnim_data_s *), /* Remote port fixed attributes */ .show_rport_maxframe_size = 1, .show_rport_supported_classes = 1, .show_rport_dev_loss_tmo = 1, .set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo, .issue_fc_host_lip = bfad_im_issue_fc_host_lip, .vport_create = bfad_im_vport_create, .vport_delete = bfad_im_vport_delete, .vport_disable = bfad_im_vport_disable, .set_vport_symbolic_name = bfad_im_vport_set_symbolic_name, .bsg_request = bfad_im_bsg_request, .bsg_timeout = bfad_im_bsg_timeout, }; struct fc_function_template bfad_im_vport_fc_function_template = { /* Target dynamic attributes */ .get_starget_port_id = bfad_im_get_starget_port_id, .show_starget_port_id = 1, .get_starget_node_name = bfad_im_get_starget_node_name, .show_starget_node_name = 1, .get_starget_port_name = bfad_im_get_starget_port_name, .show_starget_port_name = 1, /* Host dynamic attribute */ .get_host_port_id = bfad_im_get_host_port_id, .show_host_port_id = 1, /* Host fixed attributes */ .show_host_node_name = 1, .show_host_port_name = 1, .show_host_supported_classes = 1, .show_host_supported_fc4s = 1, .show_host_supported_speeds = 1, .show_host_maxframe_size = 1, /* More host dynamic attributes */ .show_host_port_type = 1, .get_host_port_type = bfad_im_get_host_port_type, .show_host_port_state = 1, .get_host_port_state = bfad_im_get_host_port_state, .show_host_active_fc4s = 1, .get_host_active_fc4s = bfad_im_get_host_active_fc4s, .show_host_speed = 1, .get_host_speed = bfad_im_get_host_speed, .show_host_fabric_name = 1, .get_host_fabric_name = bfad_im_get_host_fabric_name, .show_host_symbolic_name = 1, /* Statistics */ .get_fc_host_stats = bfad_im_get_stats, .reset_fc_host_stats = bfad_im_reset_stats, /* Allocation length for host specific data */ .dd_fcrport_size = sizeof(struct bfad_itnim_data_s *), /* Remote port fixed attributes */ .show_rport_maxframe_size = 1, .show_rport_supported_classes = 1, .show_rport_dev_loss_tmo = 1, .set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo, }; /* * Scsi_Host_attrs SCSI host attributes */ static ssize_t bfad_im_serial_num_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN]; bfa_get_adapter_serial_num(&bfad->bfa, serial_num); return sysfs_emit(buf, "%s\n", serial_num); } static ssize_t bfad_im_model_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; char model[BFA_ADAPTER_MODEL_NAME_LEN]; bfa_get_adapter_model(&bfad->bfa, model); return sysfs_emit(buf, "%s\n", model); } static ssize_t bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; char model[BFA_ADAPTER_MODEL_NAME_LEN]; char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN]; int nports = 0; bfa_get_adapter_model(&bfad->bfa, model); nports = bfa_get_nports(&bfad->bfa); if (!strcmp(model, "QLogic-425")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 4Gbps PCIe dual port FC HBA"); else if (!strcmp(model, "QLogic-825")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 8Gbps PCIe dual port FC HBA"); else if (!strcmp(model, "QLogic-42B")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 4Gbps PCIe dual port FC HBA for HP"); else if (!strcmp(model, "QLogic-82B")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 8Gbps PCIe dual port FC HBA for HP"); else if (!strcmp(model, "QLogic-1010")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 10Gbps single port CNA"); else if (!strcmp(model, "QLogic-1020")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 10Gbps dual port CNA"); else if (!strcmp(model, "QLogic-1007")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 10Gbps CNA for IBM Blade Center"); else if (!strcmp(model, "QLogic-415")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 4Gbps PCIe single port FC HBA"); else if (!strcmp(model, "QLogic-815")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 8Gbps PCIe single port FC HBA"); else if (!strcmp(model, "QLogic-41B")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 4Gbps PCIe single port FC HBA for HP"); else if (!strcmp(model, "QLogic-81B")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 8Gbps PCIe single port FC HBA for HP"); else if (!strcmp(model, "QLogic-804")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 8Gbps FC HBA for HP Bladesystem C-class"); else if (!strcmp(model, "QLogic-1741")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 10Gbps CNA for Dell M-Series Blade Servers"); else if (strstr(model, "QLogic-1860")) { if (nports == 1 && bfa_ioc_is_cna(&bfad->bfa.ioc)) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 10Gbps single port CNA"); else if (nports == 1 && !bfa_ioc_is_cna(&bfad->bfa.ioc)) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 16Gbps PCIe single port FC HBA"); else if (nports == 2 && bfa_ioc_is_cna(&bfad->bfa.ioc)) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 10Gbps dual port CNA"); else if (nports == 2 && !bfa_ioc_is_cna(&bfad->bfa.ioc)) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 16Gbps PCIe dual port FC HBA"); } else if (!strcmp(model, "QLogic-1867")) { if (nports == 1 && !bfa_ioc_is_cna(&bfad->bfa.ioc)) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 16Gbps PCIe single port FC HBA for IBM"); else if (nports == 2 && !bfa_ioc_is_cna(&bfad->bfa.ioc)) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "QLogic BR-series 16Gbps PCIe dual port FC HBA for IBM"); } else snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "Invalid Model"); return sysfs_emit(buf, "%s\n", model_descr); } static ssize_t bfad_im_node_name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_port_s *port = im_port->port; u64 nwwn; nwwn = bfa_fcs_lport_get_nwwn(port->fcs_port); return sysfs_emit(buf, "0x%llx\n", cpu_to_be64(nwwn)); } static ssize_t bfad_im_symbolic_name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; struct bfa_lport_attr_s port_attr; char symname[BFA_SYMNAME_MAXLEN]; bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr); strscpy(symname, port_attr.port_cfg.sym_name.symname, BFA_SYMNAME_MAXLEN); return sysfs_emit(buf, "%s\n", symname); } static ssize_t bfad_im_hw_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; char hw_ver[BFA_VERSION_LEN]; bfa_get_pci_chip_rev(&bfad->bfa, hw_ver); return sysfs_emit(buf, "%s\n", hw_ver); } static ssize_t bfad_im_drv_version_show(struct device *dev, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "%s\n", BFAD_DRIVER_VERSION); } static ssize_t bfad_im_optionrom_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; char optrom_ver[BFA_VERSION_LEN]; bfa_get_adapter_optrom_ver(&bfad->bfa, optrom_ver); return sysfs_emit(buf, "%s\n", optrom_ver); } static ssize_t bfad_im_fw_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; char fw_ver[BFA_VERSION_LEN]; bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver); return sysfs_emit(buf, "%s\n", fw_ver); } static ssize_t bfad_im_num_of_ports_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; return sysfs_emit(buf, "%d\n", bfa_get_nports(&bfad->bfa)); } static ssize_t bfad_im_drv_name_show(struct device *dev, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "%s\n", BFAD_DRIVER_NAME); } static ssize_t bfad_im_num_of_discovered_ports_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_port_s *port = im_port->port; struct bfad_s *bfad = im_port->bfad; int nrports = 2048; struct bfa_rport_qualifier_s *rports = NULL; unsigned long flags; rports = kcalloc(nrports, sizeof(struct bfa_rport_qualifier_s), GFP_ATOMIC); if (rports == NULL) return sysfs_emit(buf, "Failed\n"); spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_fcs_lport_get_rport_quals(port->fcs_port, rports, &nrports); spin_unlock_irqrestore(&bfad->bfad_lock, flags); kfree(rports); return sysfs_emit(buf, "%d\n", nrports); } static DEVICE_ATTR(serial_number, S_IRUGO, bfad_im_serial_num_show, NULL); static DEVICE_ATTR(model, S_IRUGO, bfad_im_model_show, NULL); static DEVICE_ATTR(model_description, S_IRUGO, bfad_im_model_desc_show, NULL); static DEVICE_ATTR(node_name, S_IRUGO, bfad_im_node_name_show, NULL); static DEVICE_ATTR(symbolic_name, S_IRUGO, bfad_im_symbolic_name_show, NULL); static DEVICE_ATTR(hardware_version, S_IRUGO, bfad_im_hw_version_show, NULL); static DEVICE_ATTR(driver_version, S_IRUGO, bfad_im_drv_version_show, NULL); static DEVICE_ATTR(option_rom_version, S_IRUGO, bfad_im_optionrom_version_show, NULL); static DEVICE_ATTR(firmware_version, S_IRUGO, bfad_im_fw_version_show, NULL); static DEVICE_ATTR(number_of_ports, S_IRUGO, bfad_im_num_of_ports_show, NULL); static DEVICE_ATTR(driver_name, S_IRUGO, bfad_im_drv_name_show, NULL); static DEVICE_ATTR(number_of_discovered_ports, S_IRUGO, bfad_im_num_of_discovered_ports_show, NULL); static struct attribute *bfad_im_host_attrs[] = { &dev_attr_serial_number.attr, &dev_attr_model.attr, &dev_attr_model_description.attr, &dev_attr_node_name.attr, &dev_attr_symbolic_name.attr, &dev_attr_hardware_version.attr, &dev_attr_driver_version.attr, &dev_attr_option_rom_version.attr, &dev_attr_firmware_version.attr, &dev_attr_number_of_ports.attr, &dev_attr_driver_name.attr, &dev_attr_number_of_discovered_ports.attr, NULL, }; static const struct attribute_group bfad_im_host_attr_group = { .attrs = bfad_im_host_attrs }; const struct attribute_group *bfad_im_host_groups[] = { &bfad_im_host_attr_group, NULL }; static struct attribute *bfad_im_vport_attrs[] = { &dev_attr_serial_number.attr, &dev_attr_model.attr, &dev_attr_model_description.attr, &dev_attr_node_name.attr, &dev_attr_symbolic_name.attr, &dev_attr_hardware_version.attr, &dev_attr_driver_version.attr, &dev_attr_option_rom_version.attr, &dev_attr_firmware_version.attr, &dev_attr_number_of_ports.attr, &dev_attr_driver_name.attr, &dev_attr_number_of_discovered_ports.attr, NULL, }; static const struct attribute_group bfad_im_vport_attr_group = { .attrs = bfad_im_vport_attrs }; const struct attribute_group *bfad_im_vport_groups[] = { &bfad_im_vport_attr_group, NULL };
linux-master
drivers/scsi/bfa/bfad_attr.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. * Copyright (c) 2014- QLogic Corporation. * All rights reserved * www.qlogic.com * * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. */ #include <linux/debugfs.h> #include <linux/export.h> #include "bfad_drv.h" #include "bfad_im.h" /* * BFA debufs interface * * To access the interface, debugfs file system should be mounted * if not already mounted using: * mount -t debugfs none /sys/kernel/debug * * BFA Hierarchy: * - bfa/pci_dev:<pci_name> * where the pci_name corresponds to the one under /sys/bus/pci/drivers/bfa * * Debugging service available per pci_dev: * fwtrc: To collect current firmware trace. * drvtrc: To collect current driver trace * fwsave: To collect last saved fw trace as a result of firmware crash. * regwr: To write one word to chip register * regrd: To read one or more words from chip register. */ struct bfad_debug_info { char *debug_buffer; void *i_private; int buffer_len; }; static int bfad_debugfs_open_drvtrc(struct inode *inode, struct file *file) { struct bfad_port_s *port = inode->i_private; struct bfad_s *bfad = port->bfad; struct bfad_debug_info *debug; debug = kzalloc(sizeof(struct bfad_debug_info), GFP_KERNEL); if (!debug) return -ENOMEM; debug->debug_buffer = (void *) bfad->trcmod; debug->buffer_len = sizeof(struct bfa_trc_mod_s); file->private_data = debug; return 0; } static int bfad_debugfs_open_fwtrc(struct inode *inode, struct file *file) { struct bfad_port_s *port = inode->i_private; struct bfad_s *bfad = port->bfad; struct bfad_debug_info *fw_debug; unsigned long flags; int rc; fw_debug = kzalloc(sizeof(struct bfad_debug_info), GFP_KERNEL); if (!fw_debug) return -ENOMEM; fw_debug->buffer_len = sizeof(struct bfa_trc_mod_s); fw_debug->debug_buffer = vzalloc(fw_debug->buffer_len); if (!fw_debug->debug_buffer) { kfree(fw_debug); printk(KERN_INFO "bfad[%d]: Failed to allocate fwtrc buffer\n", bfad->inst_no); return -ENOMEM; } spin_lock_irqsave(&bfad->bfad_lock, flags); rc = bfa_ioc_debug_fwtrc(&bfad->bfa.ioc, fw_debug->debug_buffer, &fw_debug->buffer_len); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (rc != BFA_STATUS_OK) { vfree(fw_debug->debug_buffer); fw_debug->debug_buffer = NULL; kfree(fw_debug); printk(KERN_INFO "bfad[%d]: Failed to collect fwtrc\n", bfad->inst_no); return -ENOMEM; } file->private_data = fw_debug; return 0; } static int bfad_debugfs_open_fwsave(struct inode *inode, struct file *file) { struct bfad_port_s *port = inode->i_private; struct bfad_s *bfad = port->bfad; struct bfad_debug_info *fw_debug; unsigned long flags; int rc; fw_debug = kzalloc(sizeof(struct bfad_debug_info), GFP_KERNEL); if (!fw_debug) return -ENOMEM; fw_debug->buffer_len = sizeof(struct bfa_trc_mod_s); fw_debug->debug_buffer = vzalloc(fw_debug->buffer_len); if (!fw_debug->debug_buffer) { kfree(fw_debug); printk(KERN_INFO "bfad[%d]: Failed to allocate fwsave buffer\n", bfad->inst_no); return -ENOMEM; } spin_lock_irqsave(&bfad->bfad_lock, flags); rc = bfa_ioc_debug_fwsave(&bfad->bfa.ioc, fw_debug->debug_buffer, &fw_debug->buffer_len); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (rc != BFA_STATUS_OK) { vfree(fw_debug->debug_buffer); fw_debug->debug_buffer = NULL; kfree(fw_debug); printk(KERN_INFO "bfad[%d]: Failed to collect fwsave\n", bfad->inst_no); return -ENOMEM; } file->private_data = fw_debug; return 0; } static int bfad_debugfs_open_reg(struct inode *inode, struct file *file) { struct bfad_debug_info *reg_debug; reg_debug = kzalloc(sizeof(struct bfad_debug_info), GFP_KERNEL); if (!reg_debug) return -ENOMEM; reg_debug->i_private = inode->i_private; file->private_data = reg_debug; return 0; } /* Changes the current file position */ static loff_t bfad_debugfs_lseek(struct file *file, loff_t offset, int orig) { struct bfad_debug_info *debug = file->private_data; return fixed_size_llseek(file, offset, orig, debug->buffer_len); } static ssize_t bfad_debugfs_read(struct file *file, char __user *buf, size_t nbytes, loff_t *pos) { struct bfad_debug_info *debug = file->private_data; if (!debug || !debug->debug_buffer) return 0; return simple_read_from_buffer(buf, nbytes, pos, debug->debug_buffer, debug->buffer_len); } #define BFA_REG_CT_ADDRSZ (0x40000) #define BFA_REG_CB_ADDRSZ (0x20000) #define BFA_REG_ADDRSZ(__ioc) \ ((u32)(bfa_asic_id_ctc(bfa_ioc_devid(__ioc)) ? \ BFA_REG_CT_ADDRSZ : BFA_REG_CB_ADDRSZ)) #define BFA_REG_ADDRMSK(__ioc) (BFA_REG_ADDRSZ(__ioc) - 1) static bfa_status_t bfad_reg_offset_check(struct bfa_s *bfa, u32 offset, u32 len) { u8 area; /* check [16:15] */ area = (offset >> 15) & 0x7; if (area == 0) { /* PCIe core register */ if ((offset + (len<<2)) > 0x8000) /* 8k dwords or 32KB */ return BFA_STATUS_EINVAL; } else if (area == 0x1) { /* CB 32 KB memory page */ if ((offset + (len<<2)) > 0x10000) /* 8k dwords or 32KB */ return BFA_STATUS_EINVAL; } else { /* CB register space 64KB */ if ((offset + (len<<2)) > BFA_REG_ADDRMSK(&bfa->ioc)) return BFA_STATUS_EINVAL; } return BFA_STATUS_OK; } static ssize_t bfad_debugfs_read_regrd(struct file *file, char __user *buf, size_t nbytes, loff_t *pos) { struct bfad_debug_info *regrd_debug = file->private_data; struct bfad_port_s *port = (struct bfad_port_s *)regrd_debug->i_private; struct bfad_s *bfad = port->bfad; ssize_t rc; if (!bfad->regdata) return 0; rc = simple_read_from_buffer(buf, nbytes, pos, bfad->regdata, bfad->reglen); if ((*pos + nbytes) >= bfad->reglen) { kfree(bfad->regdata); bfad->regdata = NULL; bfad->reglen = 0; } return rc; } static ssize_t bfad_debugfs_write_regrd(struct file *file, const char __user *buf, size_t nbytes, loff_t *ppos) { struct bfad_debug_info *regrd_debug = file->private_data; struct bfad_port_s *port = (struct bfad_port_s *)regrd_debug->i_private; struct bfad_s *bfad = port->bfad; struct bfa_s *bfa = &bfad->bfa; struct bfa_ioc_s *ioc = &bfa->ioc; int addr, rc, i; u32 len; u32 *regbuf; void __iomem *rb, *reg_addr; unsigned long flags; void *kern_buf; kern_buf = memdup_user(buf, nbytes); if (IS_ERR(kern_buf)) return PTR_ERR(kern_buf); rc = sscanf(kern_buf, "%x:%x", &addr, &len); if (rc < 2 || len > (UINT_MAX >> 2)) { printk(KERN_INFO "bfad[%d]: %s failed to read user buf\n", bfad->inst_no, __func__); kfree(kern_buf); return -EINVAL; } kfree(kern_buf); kfree(bfad->regdata); bfad->regdata = NULL; bfad->reglen = 0; bfad->regdata = kzalloc(len << 2, GFP_KERNEL); if (!bfad->regdata) { printk(KERN_INFO "bfad[%d]: Failed to allocate regrd buffer\n", bfad->inst_no); return -ENOMEM; } bfad->reglen = len << 2; rb = bfa_ioc_bar0(ioc); addr &= BFA_REG_ADDRMSK(ioc); /* offset and len sanity check */ rc = bfad_reg_offset_check(bfa, addr, len); if (rc) { printk(KERN_INFO "bfad[%d]: Failed reg offset check\n", bfad->inst_no); kfree(bfad->regdata); bfad->regdata = NULL; bfad->reglen = 0; return -EINVAL; } reg_addr = rb + addr; regbuf = (u32 *)bfad->regdata; spin_lock_irqsave(&bfad->bfad_lock, flags); for (i = 0; i < len; i++) { *regbuf = readl(reg_addr); regbuf++; reg_addr += sizeof(u32); } spin_unlock_irqrestore(&bfad->bfad_lock, flags); return nbytes; } static ssize_t bfad_debugfs_write_regwr(struct file *file, const char __user *buf, size_t nbytes, loff_t *ppos) { struct bfad_debug_info *debug = file->private_data; struct bfad_port_s *port = (struct bfad_port_s *)debug->i_private; struct bfad_s *bfad = port->bfad; struct bfa_s *bfa = &bfad->bfa; struct bfa_ioc_s *ioc = &bfa->ioc; int addr, val, rc; void __iomem *reg_addr; unsigned long flags; void *kern_buf; kern_buf = memdup_user(buf, nbytes); if (IS_ERR(kern_buf)) return PTR_ERR(kern_buf); rc = sscanf(kern_buf, "%x:%x", &addr, &val); if (rc < 2) { printk(KERN_INFO "bfad[%d]: %s failed to read user buf\n", bfad->inst_no, __func__); kfree(kern_buf); return -EINVAL; } kfree(kern_buf); addr &= BFA_REG_ADDRMSK(ioc); /* offset only 17 bit and word align */ /* offset and len sanity check */ rc = bfad_reg_offset_check(bfa, addr, 1); if (rc) { printk(KERN_INFO "bfad[%d]: Failed reg offset check\n", bfad->inst_no); return -EINVAL; } reg_addr = (bfa_ioc_bar0(ioc)) + addr; spin_lock_irqsave(&bfad->bfad_lock, flags); writel(val, reg_addr); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return nbytes; } static int bfad_debugfs_release(struct inode *inode, struct file *file) { struct bfad_debug_info *debug = file->private_data; if (!debug) return 0; file->private_data = NULL; kfree(debug); return 0; } static int bfad_debugfs_release_fwtrc(struct inode *inode, struct file *file) { struct bfad_debug_info *fw_debug = file->private_data; if (!fw_debug) return 0; vfree(fw_debug->debug_buffer); file->private_data = NULL; kfree(fw_debug); return 0; } static const struct file_operations bfad_debugfs_op_drvtrc = { .owner = THIS_MODULE, .open = bfad_debugfs_open_drvtrc, .llseek = bfad_debugfs_lseek, .read = bfad_debugfs_read, .release = bfad_debugfs_release, }; static const struct file_operations bfad_debugfs_op_fwtrc = { .owner = THIS_MODULE, .open = bfad_debugfs_open_fwtrc, .llseek = bfad_debugfs_lseek, .read = bfad_debugfs_read, .release = bfad_debugfs_release_fwtrc, }; static const struct file_operations bfad_debugfs_op_fwsave = { .owner = THIS_MODULE, .open = bfad_debugfs_open_fwsave, .llseek = bfad_debugfs_lseek, .read = bfad_debugfs_read, .release = bfad_debugfs_release_fwtrc, }; static const struct file_operations bfad_debugfs_op_regrd = { .owner = THIS_MODULE, .open = bfad_debugfs_open_reg, .llseek = bfad_debugfs_lseek, .read = bfad_debugfs_read_regrd, .write = bfad_debugfs_write_regrd, .release = bfad_debugfs_release, }; static const struct file_operations bfad_debugfs_op_regwr = { .owner = THIS_MODULE, .open = bfad_debugfs_open_reg, .llseek = bfad_debugfs_lseek, .write = bfad_debugfs_write_regwr, .release = bfad_debugfs_release, }; struct bfad_debugfs_entry { const char *name; umode_t mode; const struct file_operations *fops; }; static const struct bfad_debugfs_entry bfad_debugfs_files[] = { { "drvtrc", S_IFREG|S_IRUGO, &bfad_debugfs_op_drvtrc, }, { "fwtrc", S_IFREG|S_IRUGO, &bfad_debugfs_op_fwtrc, }, { "fwsave", S_IFREG|S_IRUGO, &bfad_debugfs_op_fwsave, }, { "regrd", S_IFREG|S_IRUGO|S_IWUSR, &bfad_debugfs_op_regrd, }, { "regwr", S_IFREG|S_IWUSR, &bfad_debugfs_op_regwr, }, }; static struct dentry *bfa_debugfs_root; static atomic_t bfa_debugfs_port_count; inline void bfad_debugfs_init(struct bfad_port_s *port) { struct bfad_s *bfad = port->bfad; const struct bfad_debugfs_entry *file; char name[64]; int i; if (!bfa_debugfs_enable) return; /* Setup the BFA debugfs root directory*/ if (!bfa_debugfs_root) { bfa_debugfs_root = debugfs_create_dir("bfa", NULL); atomic_set(&bfa_debugfs_port_count, 0); } /* Setup the pci_dev debugfs directory for the port */ snprintf(name, sizeof(name), "pci_dev:%s", bfad->pci_name); if (!port->port_debugfs_root) { port->port_debugfs_root = debugfs_create_dir(name, bfa_debugfs_root); atomic_inc(&bfa_debugfs_port_count); for (i = 0; i < ARRAY_SIZE(bfad_debugfs_files); i++) { file = &bfad_debugfs_files[i]; bfad->bfad_dentry_files[i] = debugfs_create_file(file->name, file->mode, port->port_debugfs_root, port, file->fops); } } return; } inline void bfad_debugfs_exit(struct bfad_port_s *port) { struct bfad_s *bfad = port->bfad; int i; for (i = 0; i < ARRAY_SIZE(bfad_debugfs_files); i++) { if (bfad->bfad_dentry_files[i]) { debugfs_remove(bfad->bfad_dentry_files[i]); bfad->bfad_dentry_files[i] = NULL; } } /* Remove the pci_dev debugfs directory for the port */ if (port->port_debugfs_root) { debugfs_remove(port->port_debugfs_root); port->port_debugfs_root = NULL; atomic_dec(&bfa_debugfs_port_count); } /* Remove the BFA debugfs root directory */ if (atomic_read(&bfa_debugfs_port_count) == 0) { debugfs_remove(bfa_debugfs_root); bfa_debugfs_root = NULL; } }
linux-master
drivers/scsi/bfa/bfad_debugfs.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. * Copyright (c) 2014- QLogic Corporation. * All rights reserved * www.qlogic.com * * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. */ #include "bfad_drv.h" #include "bfad_im.h" #include "bfa_ioc.h" #include "bfi_reg.h" #include "bfa_defs.h" #include "bfa_defs_svc.h" #include "bfi.h" BFA_TRC_FILE(CNA, IOC); /* * IOC local definitions */ #define BFA_IOC_TOV 3000 /* msecs */ #define BFA_IOC_HWSEM_TOV 500 /* msecs */ #define BFA_IOC_HB_TOV 500 /* msecs */ #define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV #define BFA_IOC_POLL_TOV BFA_TIMER_FREQ #define bfa_ioc_timer_start(__ioc) \ bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \ bfa_ioc_timeout, (__ioc), BFA_IOC_TOV) #define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer) #define bfa_hb_timer_start(__ioc) \ bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer, \ bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV) #define bfa_hb_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->hb_timer) #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn)) #define bfa_ioc_state_disabled(__sm) \ (((__sm) == BFI_IOC_UNINIT) || \ ((__sm) == BFI_IOC_INITING) || \ ((__sm) == BFI_IOC_HWINIT) || \ ((__sm) == BFI_IOC_DISABLED) || \ ((__sm) == BFI_IOC_FAIL) || \ ((__sm) == BFI_IOC_CFG_DISABLED)) /* * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */ #define bfa_ioc_firmware_lock(__ioc) \ ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc)) #define bfa_ioc_firmware_unlock(__ioc) \ ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc)) #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc)) #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc)) #define bfa_ioc_notify_fail(__ioc) \ ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc)) #define bfa_ioc_sync_start(__ioc) \ ((__ioc)->ioc_hwif->ioc_sync_start(__ioc)) #define bfa_ioc_sync_join(__ioc) \ ((__ioc)->ioc_hwif->ioc_sync_join(__ioc)) #define bfa_ioc_sync_leave(__ioc) \ ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc)) #define bfa_ioc_sync_ack(__ioc) \ ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc)) #define bfa_ioc_sync_complete(__ioc) \ ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc)) #define bfa_ioc_set_cur_ioc_fwstate(__ioc, __fwstate) \ ((__ioc)->ioc_hwif->ioc_set_fwstate(__ioc, __fwstate)) #define bfa_ioc_get_cur_ioc_fwstate(__ioc) \ ((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc)) #define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate) \ ((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate)) #define bfa_ioc_get_alt_ioc_fwstate(__ioc) \ ((__ioc)->ioc_hwif->ioc_get_alt_fwstate(__ioc)) #define bfa_ioc_mbox_cmd_pending(__ioc) \ (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \ readl((__ioc)->ioc_regs.hfn_mbox_cmd)) bfa_boolean_t bfa_auto_recover = BFA_TRUE; /* * forward declarations */ static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc); static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force); static void bfa_ioc_timeout(void *ioc); static void bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc); static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc); static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc); static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc); static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc); static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc); static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc); static void bfa_ioc_recover(struct bfa_ioc_s *ioc); static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc , enum bfa_ioc_event_e event); static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc); static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc); static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc); static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc); static enum bfi_ioc_img_ver_cmp_e bfa_ioc_fw_ver_patch_cmp( struct bfi_ioc_image_hdr_s *base_fwhdr, struct bfi_ioc_image_hdr_s *fwhdr_to_cmp); static enum bfi_ioc_img_ver_cmp_e bfa_ioc_flash_fwver_cmp( struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *base_fwhdr); /* * IOC state machine definitions/declarations */ enum ioc_event { IOC_E_RESET = 1, /* IOC reset request */ IOC_E_ENABLE = 2, /* IOC enable request */ IOC_E_DISABLE = 3, /* IOC disable request */ IOC_E_DETACH = 4, /* driver detach cleanup */ IOC_E_ENABLED = 5, /* f/w enabled */ IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */ IOC_E_DISABLED = 7, /* f/w disabled */ IOC_E_PFFAILED = 8, /* failure notice by iocpf sm */ IOC_E_HBFAIL = 9, /* heartbeat failure */ IOC_E_HWERROR = 10, /* hardware error interrupt */ IOC_E_TIMEOUT = 11, /* timeout */ IOC_E_HWFAILED = 12, /* PCI mapping failure notice */ }; bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event); bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event); bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event); bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event); bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event); bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event); bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event); bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event); bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event); bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event); static struct bfa_sm_table_s ioc_sm_table[] = { {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT}, {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET}, {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING}, {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR}, {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL}, {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL}, {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL}, {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING}, {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED}, {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL}, }; /* * IOCPF state machine definitions/declarations */ #define bfa_iocpf_timer_start(__ioc) \ bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \ bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV) #define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer) #define bfa_iocpf_poll_timer_start(__ioc) \ bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \ bfa_iocpf_poll_timeout, (__ioc), BFA_IOC_POLL_TOV) #define bfa_sem_timer_start(__ioc) \ bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \ bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV) #define bfa_sem_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->sem_timer) /* * Forward declareations for iocpf state machine */ static void bfa_iocpf_timeout(void *ioc_arg); static void bfa_iocpf_sem_timeout(void *ioc_arg); static void bfa_iocpf_poll_timeout(void *ioc_arg); /* * IOCPF state machine events */ enum iocpf_event { IOCPF_E_ENABLE = 1, /* IOCPF enable request */ IOCPF_E_DISABLE = 2, /* IOCPF disable request */ IOCPF_E_STOP = 3, /* stop on driver detach */ IOCPF_E_FWREADY = 4, /* f/w initialization done */ IOCPF_E_FWRSP_ENABLE = 5, /* enable f/w response */ IOCPF_E_FWRSP_DISABLE = 6, /* disable f/w response */ IOCPF_E_FAIL = 7, /* failure notice by ioc sm */ IOCPF_E_INITFAIL = 8, /* init fail notice by ioc sm */ IOCPF_E_GETATTRFAIL = 9, /* init fail notice by ioc sm */ IOCPF_E_SEMLOCKED = 10, /* h/w semaphore is locked */ IOCPF_E_TIMEOUT = 11, /* f/w response timeout */ IOCPF_E_SEM_ERROR = 12, /* h/w sem mapping error */ }; /* * IOCPF states */ enum bfa_iocpf_state { BFA_IOCPF_RESET = 1, /* IOC is in reset state */ BFA_IOCPF_SEMWAIT = 2, /* Waiting for IOC h/w semaphore */ BFA_IOCPF_HWINIT = 3, /* IOC h/w is being initialized */ BFA_IOCPF_READY = 4, /* IOCPF is initialized */ BFA_IOCPF_INITFAIL = 5, /* IOCPF failed */ BFA_IOCPF_FAIL = 6, /* IOCPF failed */ BFA_IOCPF_DISABLING = 7, /* IOCPF is being disabled */ BFA_IOCPF_DISABLED = 8, /* IOCPF is disabled */ BFA_IOCPF_FWMISMATCH = 9, /* IOC f/w different from drivers */ }; bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event); bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event); bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event); bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event); bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event); bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event); bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event); bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf_s, enum iocpf_event); bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event); bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf_s, enum iocpf_event); bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event); bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event); bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s, enum iocpf_event); bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event); static struct bfa_sm_table_s iocpf_sm_table[] = { {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET}, {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH}, {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH}, {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT}, {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT}, {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT}, {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY}, {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL}, {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL}, {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL}, {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL}, {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING}, {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING}, {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED}, }; /* * IOC State Machine */ /* * Beginning state. IOC uninit state. */ static void bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc) { } /* * IOC is in uninit state. */ static void bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event) { bfa_trc(ioc, event); switch (event) { case IOC_E_RESET: bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); break; default: bfa_sm_fault(ioc, event); } } /* * Reset entry actions -- initialize state machine */ static void bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc) { bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset); } /* * IOC is in reset state. */ static void bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event) { bfa_trc(ioc, event); switch (event) { case IOC_E_ENABLE: bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling); break; case IOC_E_DISABLE: bfa_ioc_disable_comp(ioc); break; case IOC_E_DETACH: bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); break; default: bfa_sm_fault(ioc, event); } } static void bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc) { bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE); } /* * Host IOC function is being enabled, awaiting response from firmware. * Semaphore is acquired. */ static void bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event) { bfa_trc(ioc, event); switch (event) { case IOC_E_ENABLED: bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); break; case IOC_E_PFFAILED: /* !!! fall through !!! */ case IOC_E_HWERROR: ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); if (event != IOC_E_PFFAILED) bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL); break; case IOC_E_HWFAILED: ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail); break; case IOC_E_DISABLE: bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); break; case IOC_E_DETACH: bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP); break; case IOC_E_ENABLE: break; default: bfa_sm_fault(ioc, event); } } static void bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc) { bfa_ioc_timer_start(ioc); bfa_ioc_send_getattr(ioc); } /* * IOC configuration in progress. Timer is active. */ static void bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event) { bfa_trc(ioc, event); switch (event) { case IOC_E_FWRSP_GETATTR: bfa_ioc_timer_stop(ioc); bfa_fsm_set_state(ioc, bfa_ioc_sm_op); break; case IOC_E_PFFAILED: case IOC_E_HWERROR: bfa_ioc_timer_stop(ioc); fallthrough; case IOC_E_TIMEOUT: ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); if (event != IOC_E_PFFAILED) bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL); break; case IOC_E_DISABLE: bfa_ioc_timer_stop(ioc); bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); break; case IOC_E_ENABLE: break; default: bfa_sm_fault(ioc, event); } } static void bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc) { struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK); bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED); bfa_ioc_hb_monitor(ioc); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n"); bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE); } static void bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event) { bfa_trc(ioc, event); switch (event) { case IOC_E_ENABLE: break; case IOC_E_DISABLE: bfa_hb_timer_stop(ioc); bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); break; case IOC_E_PFFAILED: case IOC_E_HWERROR: bfa_hb_timer_stop(ioc); fallthrough; case IOC_E_HBFAIL: if (ioc->iocpf.auto_recover) bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry); else bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); bfa_ioc_fail_notify(ioc); if (event != IOC_E_PFFAILED) bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL); break; default: bfa_sm_fault(ioc, event); } } static void bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc) { struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n"); bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE); } /* * IOC is being disabled */ static void bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event) { bfa_trc(ioc, event); switch (event) { case IOC_E_DISABLED: bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); break; case IOC_E_HWERROR: /* * No state change. Will move to disabled state * after iocpf sm completes failure processing and * moves to disabled state. */ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL); break; case IOC_E_HWFAILED: bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail); bfa_ioc_disable_comp(ioc); break; default: bfa_sm_fault(ioc, event); } } /* * IOC disable completion entry. */ static void bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc) { bfa_ioc_disable_comp(ioc); } static void bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event) { bfa_trc(ioc, event); switch (event) { case IOC_E_ENABLE: bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling); break; case IOC_E_DISABLE: ioc->cbfn->disable_cbfn(ioc->bfa); break; case IOC_E_DETACH: bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP); break; default: bfa_sm_fault(ioc, event); } } static void bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s *ioc) { bfa_trc(ioc, 0); } /* * Hardware initialization retry. */ static void bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event) { bfa_trc(ioc, event); switch (event) { case IOC_E_ENABLED: bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); break; case IOC_E_PFFAILED: case IOC_E_HWERROR: /* * Initialization retry failed. */ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); if (event != IOC_E_PFFAILED) bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL); break; case IOC_E_HWFAILED: ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail); break; case IOC_E_ENABLE: break; case IOC_E_DISABLE: bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); break; case IOC_E_DETACH: bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP); break; default: bfa_sm_fault(ioc, event); } } static void bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc) { bfa_trc(ioc, 0); } /* * IOC failure. */ static void bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event) { bfa_trc(ioc, event); switch (event) { case IOC_E_ENABLE: ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); break; case IOC_E_DISABLE: bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); break; case IOC_E_DETACH: bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP); break; case IOC_E_HWERROR: case IOC_E_HWFAILED: /* * HB failure / HW error notification, ignore. */ break; default: bfa_sm_fault(ioc, event); } } static void bfa_ioc_sm_hwfail_entry(struct bfa_ioc_s *ioc) { bfa_trc(ioc, 0); } static void bfa_ioc_sm_hwfail(struct bfa_ioc_s *ioc, enum ioc_event event) { bfa_trc(ioc, event); switch (event) { case IOC_E_ENABLE: ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); break; case IOC_E_DISABLE: ioc->cbfn->disable_cbfn(ioc->bfa); break; case IOC_E_DETACH: bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); break; case IOC_E_HWERROR: /* Ignore - already in hwfail state */ break; default: bfa_sm_fault(ioc, event); } } /* * IOCPF State Machine */ /* * Reset entry actions -- initialize state machine */ static void bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf) { iocpf->fw_mismatch_notified = BFA_FALSE; iocpf->auto_recover = bfa_auto_recover; } /* * Beginning state. IOC is in reset state. */ static void bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event) { struct bfa_ioc_s *ioc = iocpf->ioc; bfa_trc(ioc, event); switch (event) { case IOCPF_E_ENABLE: bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck); break; case IOCPF_E_STOP: break; default: bfa_sm_fault(ioc, event); } } /* * Semaphore should be acquired for version check. */ static void bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf) { struct bfi_ioc_image_hdr_s fwhdr; u32 r32, fwstate, pgnum, loff = 0; int i; /* * Spin on init semaphore to serialize. */ r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg); while (r32 & 0x1) { udelay(20); r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg); } /* h/w sem init */ fwstate = bfa_ioc_get_cur_ioc_fwstate(iocpf->ioc); if (fwstate == BFI_IOC_UNINIT) { writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg); goto sem_get; } bfa_ioc_fwver_get(iocpf->ioc, &fwhdr); if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) { writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg); goto sem_get; } /* * Clear fwver hdr */ pgnum = PSS_SMEM_PGNUM(iocpf->ioc->ioc_regs.smem_pg0, loff); writel(pgnum, iocpf->ioc->ioc_regs.host_page_num_fn); for (i = 0; i < sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32); i++) { bfa_mem_write(iocpf->ioc->ioc_regs.smem_page_start, loff, 0); loff += sizeof(u32); } bfa_trc(iocpf->ioc, fwstate); bfa_trc(iocpf->ioc, swab32(fwhdr.exec)); bfa_ioc_set_cur_ioc_fwstate(iocpf->ioc, BFI_IOC_UNINIT); bfa_ioc_set_alt_ioc_fwstate(iocpf->ioc, BFI_IOC_UNINIT); /* * Unlock the hw semaphore. Should be here only once per boot. */ bfa_ioc_ownership_reset(iocpf->ioc); /* * unlock init semaphore. */ writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg); sem_get: bfa_ioc_hw_sem_get(iocpf->ioc); } /* * Awaiting h/w semaphore to continue with version check. */ static void bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event) { struct bfa_ioc_s *ioc = iocpf->ioc; bfa_trc(ioc, event); switch (event) { case IOCPF_E_SEMLOCKED: if (bfa_ioc_firmware_lock(ioc)) { if (bfa_ioc_sync_start(ioc)) { bfa_ioc_sync_join(ioc); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); } else { bfa_ioc_firmware_unlock(ioc); writel(1, ioc->ioc_regs.ioc_sem_reg); bfa_sem_timer_start(ioc); } } else { writel(1, ioc->ioc_regs.ioc_sem_reg); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch); } break; case IOCPF_E_SEM_ERROR: bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); bfa_fsm_send_event(ioc, IOC_E_HWFAILED); break; case IOCPF_E_DISABLE: bfa_sem_timer_stop(ioc); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); bfa_fsm_send_event(ioc, IOC_E_DISABLED); break; case IOCPF_E_STOP: bfa_sem_timer_stop(ioc); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); break; default: bfa_sm_fault(ioc, event); } } /* * Notify enable completion callback. */ static void bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf) { /* * Call only the first time sm enters fwmismatch state. */ if (iocpf->fw_mismatch_notified == BFA_FALSE) bfa_ioc_pf_fwmismatch(iocpf->ioc); iocpf->fw_mismatch_notified = BFA_TRUE; bfa_iocpf_timer_start(iocpf->ioc); } /* * Awaiting firmware version match. */ static void bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event) { struct bfa_ioc_s *ioc = iocpf->ioc; bfa_trc(ioc, event); switch (event) { case IOCPF_E_TIMEOUT: bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck); break; case IOCPF_E_DISABLE: bfa_iocpf_timer_stop(ioc); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); bfa_fsm_send_event(ioc, IOC_E_DISABLED); break; case IOCPF_E_STOP: bfa_iocpf_timer_stop(ioc); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); break; default: bfa_sm_fault(ioc, event); } } /* * Request for semaphore. */ static void bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf) { bfa_ioc_hw_sem_get(iocpf->ioc); } /* * Awaiting semaphore for h/w initialzation. */ static void bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event) { struct bfa_ioc_s *ioc = iocpf->ioc; bfa_trc(ioc, event); switch (event) { case IOCPF_E_SEMLOCKED: if (bfa_ioc_sync_complete(ioc)) { bfa_ioc_sync_join(ioc); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); } else { writel(1, ioc->ioc_regs.ioc_sem_reg); bfa_sem_timer_start(ioc); } break; case IOCPF_E_SEM_ERROR: bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); bfa_fsm_send_event(ioc, IOC_E_HWFAILED); break; case IOCPF_E_DISABLE: bfa_sem_timer_stop(ioc); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); break; default: bfa_sm_fault(ioc, event); } } static void bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf) { iocpf->poll_time = 0; bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE); } /* * Hardware is being initialized. Interrupts are enabled. * Holding hardware semaphore lock. */ static void bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event) { struct bfa_ioc_s *ioc = iocpf->ioc; bfa_trc(ioc, event); switch (event) { case IOCPF_E_FWREADY: bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling); break; case IOCPF_E_TIMEOUT: writel(1, ioc->ioc_regs.ioc_sem_reg); bfa_fsm_send_event(ioc, IOC_E_PFFAILED); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); break; case IOCPF_E_DISABLE: bfa_iocpf_timer_stop(ioc); bfa_ioc_sync_leave(ioc); writel(1, ioc->ioc_regs.ioc_sem_reg); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); break; default: bfa_sm_fault(ioc, event); } } static void bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf) { bfa_iocpf_timer_start(iocpf->ioc); /* * Enable Interrupts before sending fw IOC ENABLE cmd. */ iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa); bfa_ioc_send_enable(iocpf->ioc); } /* * Host IOC function is being enabled, awaiting response from firmware. * Semaphore is acquired. */ static void bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event) { struct bfa_ioc_s *ioc = iocpf->ioc; bfa_trc(ioc, event); switch (event) { case IOCPF_E_FWRSP_ENABLE: bfa_iocpf_timer_stop(ioc); writel(1, ioc->ioc_regs.ioc_sem_reg); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready); break; case IOCPF_E_INITFAIL: bfa_iocpf_timer_stop(ioc); fallthrough; case IOCPF_E_TIMEOUT: writel(1, ioc->ioc_regs.ioc_sem_reg); if (event == IOCPF_E_TIMEOUT) bfa_fsm_send_event(ioc, IOC_E_PFFAILED); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); break; case IOCPF_E_DISABLE: bfa_iocpf_timer_stop(ioc); writel(1, ioc->ioc_regs.ioc_sem_reg); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling); break; default: bfa_sm_fault(ioc, event); } } static void bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf) { bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED); } static void bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event) { struct bfa_ioc_s *ioc = iocpf->ioc; bfa_trc(ioc, event); switch (event) { case IOCPF_E_DISABLE: bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling); break; case IOCPF_E_GETATTRFAIL: bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); break; case IOCPF_E_FAIL: bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync); break; default: bfa_sm_fault(ioc, event); } } static void bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf) { bfa_iocpf_timer_start(iocpf->ioc); bfa_ioc_send_disable(iocpf->ioc); } /* * IOC is being disabled */ static void bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event) { struct bfa_ioc_s *ioc = iocpf->ioc; bfa_trc(ioc, event); switch (event) { case IOCPF_E_FWRSP_DISABLE: bfa_iocpf_timer_stop(ioc); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); break; case IOCPF_E_FAIL: bfa_iocpf_timer_stop(ioc); fallthrough; case IOCPF_E_TIMEOUT: bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); break; case IOCPF_E_FWRSP_ENABLE: break; default: bfa_sm_fault(ioc, event); } } static void bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s *iocpf) { bfa_ioc_hw_sem_get(iocpf->ioc); } /* * IOC hb ack request is being removed. */ static void bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event) { struct bfa_ioc_s *ioc = iocpf->ioc; bfa_trc(ioc, event); switch (event) { case IOCPF_E_SEMLOCKED: bfa_ioc_sync_leave(ioc); writel(1, ioc->ioc_regs.ioc_sem_reg); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); break; case IOCPF_E_SEM_ERROR: bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); bfa_fsm_send_event(ioc, IOC_E_HWFAILED); break; case IOCPF_E_FAIL: break; default: bfa_sm_fault(ioc, event); } } /* * IOC disable completion entry. */ static void bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf) { bfa_ioc_mbox_flush(iocpf->ioc); bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED); } static void bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event) { struct bfa_ioc_s *ioc = iocpf->ioc; bfa_trc(ioc, event); switch (event) { case IOCPF_E_ENABLE: bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); break; case IOCPF_E_STOP: bfa_ioc_firmware_unlock(ioc); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); break; default: bfa_sm_fault(ioc, event); } } static void bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf) { bfa_ioc_debug_save_ftrc(iocpf->ioc); bfa_ioc_hw_sem_get(iocpf->ioc); } /* * Hardware initialization failed. */ static void bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event) { struct bfa_ioc_s *ioc = iocpf->ioc; bfa_trc(ioc, event); switch (event) { case IOCPF_E_SEMLOCKED: bfa_ioc_notify_fail(ioc); bfa_ioc_sync_leave(ioc); bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); writel(1, ioc->ioc_regs.ioc_sem_reg); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail); break; case IOCPF_E_SEM_ERROR: bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); bfa_fsm_send_event(ioc, IOC_E_HWFAILED); break; case IOCPF_E_DISABLE: bfa_sem_timer_stop(ioc); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); break; case IOCPF_E_STOP: bfa_sem_timer_stop(ioc); bfa_ioc_firmware_unlock(ioc); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); break; case IOCPF_E_FAIL: break; default: bfa_sm_fault(ioc, event); } } static void bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf) { bfa_trc(iocpf->ioc, 0); } /* * Hardware initialization failed. */ static void bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event) { struct bfa_ioc_s *ioc = iocpf->ioc; bfa_trc(ioc, event); switch (event) { case IOCPF_E_DISABLE: bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); break; case IOCPF_E_STOP: bfa_ioc_firmware_unlock(ioc); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); break; default: bfa_sm_fault(ioc, event); } } static void bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf) { /* * Mark IOC as failed in hardware and stop firmware. */ bfa_ioc_lpu_stop(iocpf->ioc); /* * Flush any queued up mailbox requests. */ bfa_ioc_mbox_flush(iocpf->ioc); bfa_ioc_hw_sem_get(iocpf->ioc); } static void bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event) { struct bfa_ioc_s *ioc = iocpf->ioc; bfa_trc(ioc, event); switch (event) { case IOCPF_E_SEMLOCKED: bfa_ioc_sync_ack(ioc); bfa_ioc_notify_fail(ioc); if (!iocpf->auto_recover) { bfa_ioc_sync_leave(ioc); bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); writel(1, ioc->ioc_regs.ioc_sem_reg); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); } else { if (bfa_ioc_sync_complete(ioc)) bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); else { writel(1, ioc->ioc_regs.ioc_sem_reg); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); } } break; case IOCPF_E_SEM_ERROR: bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); bfa_fsm_send_event(ioc, IOC_E_HWFAILED); break; case IOCPF_E_DISABLE: bfa_sem_timer_stop(ioc); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); break; case IOCPF_E_FAIL: break; default: bfa_sm_fault(ioc, event); } } static void bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf) { bfa_trc(iocpf->ioc, 0); } /* * IOC is in failed state. */ static void bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event) { struct bfa_ioc_s *ioc = iocpf->ioc; bfa_trc(ioc, event); switch (event) { case IOCPF_E_DISABLE: bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); break; default: bfa_sm_fault(ioc, event); } } /* * BFA IOC private functions */ /* * Notify common modules registered for notification. */ static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc, enum bfa_ioc_event_e event) { struct bfa_ioc_notify_s *notify; struct list_head *qe; list_for_each(qe, &ioc->notify_q) { notify = (struct bfa_ioc_notify_s *)qe; notify->cbfn(notify->cbarg, event); } } static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc) { ioc->cbfn->disable_cbfn(ioc->bfa); bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED); } bfa_boolean_t bfa_ioc_sem_get(void __iomem *sem_reg) { u32 r32; int cnt = 0; #define BFA_SEM_SPINCNT 3000 r32 = readl(sem_reg); while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) { cnt++; udelay(2); r32 = readl(sem_reg); } if (!(r32 & 1)) return BFA_TRUE; return BFA_FALSE; } static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc) { u32 r32; /* * First read to the semaphore register will return 0, subsequent reads * will return 1. Semaphore is released by writing 1 to the register */ r32 = readl(ioc->ioc_regs.ioc_sem_reg); if (r32 == ~0) { WARN_ON(r32 == ~0); bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR); return; } if (!(r32 & 1)) { bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED); return; } bfa_sem_timer_start(ioc); } /* * Initialize LPU local memory (aka secondary memory / SRAM) */ static void bfa_ioc_lmem_init(struct bfa_ioc_s *ioc) { u32 pss_ctl; int i; #define PSS_LMEM_INIT_TIME 10000 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); pss_ctl &= ~__PSS_LMEM_RESET; pss_ctl |= __PSS_LMEM_INIT_EN; /* * i2c workaround 12.5khz clock */ pss_ctl |= __PSS_I2C_CLK_DIV(3UL); writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); /* * wait for memory initialization to be complete */ i = 0; do { pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); i++; } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME)); /* * If memory initialization is not successful, IOC timeout will catch * such failures. */ WARN_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE)); bfa_trc(ioc, pss_ctl); pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN); writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); } static void bfa_ioc_lpu_start(struct bfa_ioc_s *ioc) { u32 pss_ctl; /* * Take processor out of reset. */ pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); pss_ctl &= ~__PSS_LPU0_RESET; writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); } static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc) { u32 pss_ctl; /* * Put processors in reset. */ pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET); writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); } /* * Get driver and firmware versions. */ void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr) { u32 pgnum; u32 loff = 0; int i; u32 *fwsig = (u32 *) fwhdr; pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff); writel(pgnum, ioc->ioc_regs.host_page_num_fn); for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32)); i++) { fwsig[i] = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff); loff += sizeof(u32); } } /* * Returns TRUE if driver is willing to work with current smem f/w version. */ bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *smem_fwhdr) { struct bfi_ioc_image_hdr_s *drv_fwhdr; enum bfi_ioc_img_ver_cmp_e smem_flash_cmp, drv_smem_cmp; drv_fwhdr = (struct bfi_ioc_image_hdr_s *) bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0); /* * If smem is incompatible or old, driver should not work with it. */ drv_smem_cmp = bfa_ioc_fw_ver_patch_cmp(drv_fwhdr, smem_fwhdr); if (drv_smem_cmp == BFI_IOC_IMG_VER_INCOMP || drv_smem_cmp == BFI_IOC_IMG_VER_OLD) { return BFA_FALSE; } /* * IF Flash has a better F/W than smem do not work with smem. * If smem f/w == flash f/w, as smem f/w not old | incmp, work with it. * If Flash is old or incomp work with smem iff smem f/w == drv f/w. */ smem_flash_cmp = bfa_ioc_flash_fwver_cmp(ioc, smem_fwhdr); if (smem_flash_cmp == BFI_IOC_IMG_VER_BETTER) { return BFA_FALSE; } else if (smem_flash_cmp == BFI_IOC_IMG_VER_SAME) { return BFA_TRUE; } else { return (drv_smem_cmp == BFI_IOC_IMG_VER_SAME) ? BFA_TRUE : BFA_FALSE; } } /* * Return true if current running version is valid. Firmware signature and * execution context (driver/bios) must match. */ static bfa_boolean_t bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env) { struct bfi_ioc_image_hdr_s fwhdr; bfa_ioc_fwver_get(ioc, &fwhdr); if (swab32(fwhdr.bootenv) != boot_env) { bfa_trc(ioc, fwhdr.bootenv); bfa_trc(ioc, boot_env); return BFA_FALSE; } return bfa_ioc_fwver_cmp(ioc, &fwhdr); } static bfa_boolean_t bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr_s *fwhdr_1, struct bfi_ioc_image_hdr_s *fwhdr_2) { int i; for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) if (fwhdr_1->md5sum[i] != fwhdr_2->md5sum[i]) return BFA_FALSE; return BFA_TRUE; } /* * Returns TRUE if major minor and maintainence are same. * If patch versions are same, check for MD5 Checksum to be same. */ static bfa_boolean_t bfa_ioc_fw_ver_compatible(struct bfi_ioc_image_hdr_s *drv_fwhdr, struct bfi_ioc_image_hdr_s *fwhdr_to_cmp) { if (drv_fwhdr->signature != fwhdr_to_cmp->signature) return BFA_FALSE; if (drv_fwhdr->fwver.major != fwhdr_to_cmp->fwver.major) return BFA_FALSE; if (drv_fwhdr->fwver.minor != fwhdr_to_cmp->fwver.minor) return BFA_FALSE; if (drv_fwhdr->fwver.maint != fwhdr_to_cmp->fwver.maint) return BFA_FALSE; if (drv_fwhdr->fwver.patch == fwhdr_to_cmp->fwver.patch && drv_fwhdr->fwver.phase == fwhdr_to_cmp->fwver.phase && drv_fwhdr->fwver.build == fwhdr_to_cmp->fwver.build) { return bfa_ioc_fwver_md5_check(drv_fwhdr, fwhdr_to_cmp); } return BFA_TRUE; } static bfa_boolean_t bfa_ioc_flash_fwver_valid(struct bfi_ioc_image_hdr_s *flash_fwhdr) { if (flash_fwhdr->fwver.major == 0 || flash_fwhdr->fwver.major == 0xFF) return BFA_FALSE; return BFA_TRUE; } static bfa_boolean_t fwhdr_is_ga(struct bfi_ioc_image_hdr_s *fwhdr) { if (fwhdr->fwver.phase == 0 && fwhdr->fwver.build == 0) return BFA_TRUE; return BFA_FALSE; } /* * Returns TRUE if both are compatible and patch of fwhdr_to_cmp is better. */ static enum bfi_ioc_img_ver_cmp_e bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr_s *base_fwhdr, struct bfi_ioc_image_hdr_s *fwhdr_to_cmp) { if (bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp) == BFA_FALSE) return BFI_IOC_IMG_VER_INCOMP; if (fwhdr_to_cmp->fwver.patch > base_fwhdr->fwver.patch) return BFI_IOC_IMG_VER_BETTER; else if (fwhdr_to_cmp->fwver.patch < base_fwhdr->fwver.patch) return BFI_IOC_IMG_VER_OLD; /* * GA takes priority over internal builds of the same patch stream. * At this point major minor maint and patch numbers are same. */ if (fwhdr_is_ga(base_fwhdr) == BFA_TRUE) { if (fwhdr_is_ga(fwhdr_to_cmp)) return BFI_IOC_IMG_VER_SAME; else return BFI_IOC_IMG_VER_OLD; } else { if (fwhdr_is_ga(fwhdr_to_cmp)) return BFI_IOC_IMG_VER_BETTER; } if (fwhdr_to_cmp->fwver.phase > base_fwhdr->fwver.phase) return BFI_IOC_IMG_VER_BETTER; else if (fwhdr_to_cmp->fwver.phase < base_fwhdr->fwver.phase) return BFI_IOC_IMG_VER_OLD; if (fwhdr_to_cmp->fwver.build > base_fwhdr->fwver.build) return BFI_IOC_IMG_VER_BETTER; else if (fwhdr_to_cmp->fwver.build < base_fwhdr->fwver.build) return BFI_IOC_IMG_VER_OLD; /* * All Version Numbers are equal. * Md5 check to be done as a part of compatibility check. */ return BFI_IOC_IMG_VER_SAME; } #define BFA_FLASH_PART_FWIMG_ADDR 0x100000 /* fw image address */ bfa_status_t bfa_ioc_flash_img_get_chnk(struct bfa_ioc_s *ioc, u32 off, u32 *fwimg) { return bfa_flash_raw_read(ioc->pcidev.pci_bar_kva, BFA_FLASH_PART_FWIMG_ADDR + (off * sizeof(u32)), (char *)fwimg, BFI_FLASH_CHUNK_SZ); } static enum bfi_ioc_img_ver_cmp_e bfa_ioc_flash_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *base_fwhdr) { struct bfi_ioc_image_hdr_s *flash_fwhdr; bfa_status_t status; u32 fwimg[BFI_FLASH_CHUNK_SZ_WORDS]; status = bfa_ioc_flash_img_get_chnk(ioc, 0, fwimg); if (status != BFA_STATUS_OK) return BFI_IOC_IMG_VER_INCOMP; flash_fwhdr = (struct bfi_ioc_image_hdr_s *) fwimg; if (bfa_ioc_flash_fwver_valid(flash_fwhdr) == BFA_TRUE) return bfa_ioc_fw_ver_patch_cmp(base_fwhdr, flash_fwhdr); else return BFI_IOC_IMG_VER_INCOMP; } /* * Invalidate fwver signature */ bfa_status_t bfa_ioc_fwsig_invalidate(struct bfa_ioc_s *ioc) { u32 pgnum; u32 loff = 0; enum bfi_ioc_state ioc_fwstate; ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc); if (!bfa_ioc_state_disabled(ioc_fwstate)) return BFA_STATUS_ADAPTER_ENABLED; pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff); writel(pgnum, ioc->ioc_regs.host_page_num_fn); bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, BFA_IOC_FW_INV_SIGN); return BFA_STATUS_OK; } /* * Conditionally flush any pending message from firmware at start. */ static void bfa_ioc_msgflush(struct bfa_ioc_s *ioc) { u32 r32; r32 = readl(ioc->ioc_regs.lpu_mbox_cmd); if (r32) writel(1, ioc->ioc_regs.lpu_mbox_cmd); } static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force) { enum bfi_ioc_state ioc_fwstate; bfa_boolean_t fwvalid; u32 boot_type; u32 boot_env; ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc); if (force) ioc_fwstate = BFI_IOC_UNINIT; bfa_trc(ioc, ioc_fwstate); boot_type = BFI_FWBOOT_TYPE_NORMAL; boot_env = BFI_FWBOOT_ENV_OS; /* * check if firmware is valid */ fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ? BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env); if (!fwvalid) { if (bfa_ioc_boot(ioc, boot_type, boot_env) == BFA_STATUS_OK) bfa_ioc_poll_fwinit(ioc); return; } /* * If hardware initialization is in progress (initialized by other IOC), * just wait for an initialization completion interrupt. */ if (ioc_fwstate == BFI_IOC_INITING) { bfa_ioc_poll_fwinit(ioc); return; } /* * If IOC function is disabled and firmware version is same, * just re-enable IOC. * * If option rom, IOC must not be in operational state. With * convergence, IOC will be in operational state when 2nd driver * is loaded. */ if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) { /* * When using MSI-X any pending firmware ready event should * be flushed. Otherwise MSI-X interrupts are not delivered. */ bfa_ioc_msgflush(ioc); bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY); return; } /* * Initialize the h/w for any other states. */ if (bfa_ioc_boot(ioc, boot_type, boot_env) == BFA_STATUS_OK) bfa_ioc_poll_fwinit(ioc); } static void bfa_ioc_timeout(void *ioc_arg) { struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg; bfa_trc(ioc, 0); bfa_fsm_send_event(ioc, IOC_E_TIMEOUT); } void bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len) { u32 *msgp = (u32 *) ioc_msg; u32 i; bfa_trc(ioc, msgp[0]); bfa_trc(ioc, len); WARN_ON(len > BFI_IOC_MSGLEN_MAX); /* * first write msg to mailbox registers */ for (i = 0; i < len / sizeof(u32); i++) writel(cpu_to_le32(msgp[i]), ioc->ioc_regs.hfn_mbox + i * sizeof(u32)); for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++) writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32)); /* * write 1 to mailbox CMD to trigger LPU event */ writel(1, ioc->ioc_regs.hfn_mbox_cmd); (void) readl(ioc->ioc_regs.hfn_mbox_cmd); } static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc) { struct bfi_ioc_ctrl_req_s enable_req; bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ, bfa_ioc_portid(ioc)); enable_req.clscode = cpu_to_be16(ioc->clscode); /* unsigned 32-bit time_t overflow in y2106 */ enable_req.tv_sec = be32_to_cpu(ktime_get_real_seconds()); bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s)); } static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc) { struct bfi_ioc_ctrl_req_s disable_req; bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ, bfa_ioc_portid(ioc)); disable_req.clscode = cpu_to_be16(ioc->clscode); /* unsigned 32-bit time_t overflow in y2106 */ disable_req.tv_sec = be32_to_cpu(ktime_get_real_seconds()); bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s)); } static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc) { struct bfi_ioc_getattr_req_s attr_req; bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ, bfa_ioc_portid(ioc)); bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa); bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req)); } static void bfa_ioc_hb_check(void *cbarg) { struct bfa_ioc_s *ioc = cbarg; u32 hb_count; hb_count = readl(ioc->ioc_regs.heartbeat); if (ioc->hb_count == hb_count) { bfa_ioc_recover(ioc); return; } else { ioc->hb_count = hb_count; } bfa_ioc_mbox_poll(ioc); bfa_hb_timer_start(ioc); } static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc) { ioc->hb_count = readl(ioc->ioc_regs.heartbeat); bfa_hb_timer_start(ioc); } /* * Initiate a full firmware download. */ static bfa_status_t bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env) { u32 *fwimg; u32 pgnum; u32 loff = 0; u32 chunkno = 0; u32 i; u32 asicmode; u32 fwimg_size; u32 fwimg_buf[BFI_FLASH_CHUNK_SZ_WORDS]; bfa_status_t status; if (boot_env == BFI_FWBOOT_ENV_OS && boot_type == BFI_FWBOOT_TYPE_FLASH) { fwimg_size = BFI_FLASH_IMAGE_SZ/sizeof(u32); status = bfa_ioc_flash_img_get_chnk(ioc, BFA_IOC_FLASH_CHUNK_ADDR(chunkno), fwimg_buf); if (status != BFA_STATUS_OK) return status; fwimg = fwimg_buf; } else { fwimg_size = bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); } bfa_trc(ioc, fwimg_size); pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff); writel(pgnum, ioc->ioc_regs.host_page_num_fn); for (i = 0; i < fwimg_size; i++) { if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) { chunkno = BFA_IOC_FLASH_CHUNK_NO(i); if (boot_env == BFI_FWBOOT_ENV_OS && boot_type == BFI_FWBOOT_TYPE_FLASH) { status = bfa_ioc_flash_img_get_chnk(ioc, BFA_IOC_FLASH_CHUNK_ADDR(chunkno), fwimg_buf); if (status != BFA_STATUS_OK) return status; fwimg = fwimg_buf; } else { fwimg = bfa_cb_image_get_chunk( bfa_ioc_asic_gen(ioc), BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); } } /* * write smem */ bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]); loff += sizeof(u32); /* * handle page offset wrap around */ loff = PSS_SMEM_PGOFF(loff); if (loff == 0) { pgnum++; writel(pgnum, ioc->ioc_regs.host_page_num_fn); } } writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0), ioc->ioc_regs.host_page_num_fn); /* * Set boot type, env and device mode at the end. */ if (boot_env == BFI_FWBOOT_ENV_OS && boot_type == BFI_FWBOOT_TYPE_FLASH) { boot_type = BFI_FWBOOT_TYPE_NORMAL; } asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode, ioc->port0_mode, ioc->port1_mode); bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_DEVMODE_OFF, swab32(asicmode)); bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_TYPE_OFF, swab32(boot_type)); bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_ENV_OFF, swab32(boot_env)); return BFA_STATUS_OK; } /* * Update BFA configuration from firmware configuration. */ static void bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc) { struct bfi_ioc_attr_s *attr = ioc->attr; attr->adapter_prop = be32_to_cpu(attr->adapter_prop); attr->card_type = be32_to_cpu(attr->card_type); attr->maxfrsize = be16_to_cpu(attr->maxfrsize); ioc->fcmode = (attr->port_mode == BFI_PORT_MODE_FC); attr->mfg_year = be16_to_cpu(attr->mfg_year); bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR); } /* * Attach time initialization of mbox logic. */ static void bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc) { struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; int mc; INIT_LIST_HEAD(&mod->cmd_q); for (mc = 0; mc < BFI_MC_MAX; mc++) { mod->mbhdlr[mc].cbfn = NULL; mod->mbhdlr[mc].cbarg = ioc->bfa; } } /* * Mbox poll timer -- restarts any pending mailbox requests. */ static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc) { struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; struct bfa_mbox_cmd_s *cmd; u32 stat; /* * If no command pending, do nothing */ if (list_empty(&mod->cmd_q)) return; /* * If previous command is not yet fetched by firmware, do nothing */ stat = readl(ioc->ioc_regs.hfn_mbox_cmd); if (stat) return; /* * Enqueue command to firmware. */ bfa_q_deq(&mod->cmd_q, &cmd); bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); } /* * Cleanup any pending requests. */ static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc) { struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; struct bfa_mbox_cmd_s *cmd; while (!list_empty(&mod->cmd_q)) bfa_q_deq(&mod->cmd_q, &cmd); } /* * Read data from SMEM to host through PCI memmap * * @param[in] ioc memory for IOC * @param[in] tbuf app memory to store data from smem * @param[in] soff smem offset * @param[in] sz size of smem in bytes */ static bfa_status_t bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz) { u32 pgnum, loff; __be32 r32; int i, len; u32 *buf = tbuf; pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff); loff = PSS_SMEM_PGOFF(soff); bfa_trc(ioc, pgnum); bfa_trc(ioc, loff); bfa_trc(ioc, sz); /* * Hold semaphore to serialize pll init and fwtrc. */ if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) { bfa_trc(ioc, 0); return BFA_STATUS_FAILED; } writel(pgnum, ioc->ioc_regs.host_page_num_fn); len = sz/sizeof(u32); bfa_trc(ioc, len); for (i = 0; i < len; i++) { r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff); buf[i] = swab32(r32); loff += sizeof(u32); /* * handle page offset wrap around */ loff = PSS_SMEM_PGOFF(loff); if (loff == 0) { pgnum++; writel(pgnum, ioc->ioc_regs.host_page_num_fn); } } writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0), ioc->ioc_regs.host_page_num_fn); /* * release semaphore. */ readl(ioc->ioc_regs.ioc_init_sem_reg); writel(1, ioc->ioc_regs.ioc_init_sem_reg); bfa_trc(ioc, pgnum); return BFA_STATUS_OK; } /* * Clear SMEM data from host through PCI memmap * * @param[in] ioc memory for IOC * @param[in] soff smem offset * @param[in] sz size of smem in bytes */ static bfa_status_t bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz) { int i, len; u32 pgnum, loff; pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff); loff = PSS_SMEM_PGOFF(soff); bfa_trc(ioc, pgnum); bfa_trc(ioc, loff); bfa_trc(ioc, sz); /* * Hold semaphore to serialize pll init and fwtrc. */ if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) { bfa_trc(ioc, 0); return BFA_STATUS_FAILED; } writel(pgnum, ioc->ioc_regs.host_page_num_fn); len = sz/sizeof(u32); /* len in words */ bfa_trc(ioc, len); for (i = 0; i < len; i++) { bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0); loff += sizeof(u32); /* * handle page offset wrap around */ loff = PSS_SMEM_PGOFF(loff); if (loff == 0) { pgnum++; writel(pgnum, ioc->ioc_regs.host_page_num_fn); } } writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0), ioc->ioc_regs.host_page_num_fn); /* * release semaphore. */ readl(ioc->ioc_regs.ioc_init_sem_reg); writel(1, ioc->ioc_regs.ioc_init_sem_reg); bfa_trc(ioc, pgnum); return BFA_STATUS_OK; } static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc) { struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; /* * Notify driver and common modules registered for notification. */ ioc->cbfn->hbfail_cbfn(ioc->bfa); bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED); bfa_ioc_debug_save_ftrc(ioc); BFA_LOG(KERN_CRIT, bfad, bfa_log_level, "Heart Beat of IOC has failed\n"); bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL); } static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc) { struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; /* * Provide enable completion callback. */ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); BFA_LOG(KERN_WARNING, bfad, bfa_log_level, "Running firmware version is incompatible " "with the driver version\n"); bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH); } bfa_status_t bfa_ioc_pll_init(struct bfa_ioc_s *ioc) { /* * Hold semaphore so that nobody can access the chip during init. */ bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg); bfa_ioc_pll_init_asic(ioc); ioc->pllinit = BFA_TRUE; /* * Initialize LMEM */ bfa_ioc_lmem_init(ioc); /* * release semaphore. */ readl(ioc->ioc_regs.ioc_init_sem_reg); writel(1, ioc->ioc_regs.ioc_init_sem_reg); return BFA_STATUS_OK; } /* * Interface used by diag module to do firmware boot with memory test * as the entry vector. */ bfa_status_t bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env) { struct bfi_ioc_image_hdr_s *drv_fwhdr; bfa_status_t status; bfa_ioc_stats(ioc, ioc_boots); if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK) return BFA_STATUS_FAILED; if (boot_env == BFI_FWBOOT_ENV_OS && boot_type == BFI_FWBOOT_TYPE_NORMAL) { drv_fwhdr = (struct bfi_ioc_image_hdr_s *) bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0); /* * Work with Flash iff flash f/w is better than driver f/w. * Otherwise push drivers firmware. */ if (bfa_ioc_flash_fwver_cmp(ioc, drv_fwhdr) == BFI_IOC_IMG_VER_BETTER) boot_type = BFI_FWBOOT_TYPE_FLASH; } /* * Initialize IOC state of all functions on a chip reset. */ if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) { bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_MEMTEST); bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_MEMTEST); } else { bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_INITING); bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_INITING); } bfa_ioc_msgflush(ioc); status = bfa_ioc_download_fw(ioc, boot_type, boot_env); if (status == BFA_STATUS_OK) bfa_ioc_lpu_start(ioc); else { WARN_ON(boot_type == BFI_FWBOOT_TYPE_MEMTEST); bfa_iocpf_timeout(ioc); } return status; } /* * Enable/disable IOC failure auto recovery. */ void bfa_ioc_auto_recover(bfa_boolean_t auto_recover) { bfa_auto_recover = auto_recover; } bfa_boolean_t bfa_ioc_is_operational(struct bfa_ioc_s *ioc) { return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op); } bfa_boolean_t bfa_ioc_is_initialized(struct bfa_ioc_s *ioc) { u32 r32 = bfa_ioc_get_cur_ioc_fwstate(ioc); return ((r32 != BFI_IOC_UNINIT) && (r32 != BFI_IOC_INITING) && (r32 != BFI_IOC_MEMTEST)); } bfa_boolean_t bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg) { __be32 *msgp = mbmsg; u32 r32; int i; r32 = readl(ioc->ioc_regs.lpu_mbox_cmd); if ((r32 & 1) == 0) return BFA_FALSE; /* * read the MBOX msg */ for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32)); i++) { r32 = readl(ioc->ioc_regs.lpu_mbox + i * sizeof(u32)); msgp[i] = cpu_to_be32(r32); } /* * turn off mailbox interrupt by clearing mailbox status */ writel(1, ioc->ioc_regs.lpu_mbox_cmd); readl(ioc->ioc_regs.lpu_mbox_cmd); return BFA_TRUE; } void bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m) { union bfi_ioc_i2h_msg_u *msg; struct bfa_iocpf_s *iocpf = &ioc->iocpf; msg = (union bfi_ioc_i2h_msg_u *) m; bfa_ioc_stats(ioc, ioc_isrs); switch (msg->mh.msg_id) { case BFI_IOC_I2H_HBEAT: break; case BFI_IOC_I2H_ENABLE_REPLY: ioc->port_mode = ioc->port_mode_cfg = (enum bfa_mode_s)msg->fw_event.port_mode; ioc->ad_cap_bm = msg->fw_event.cap_bm; bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE); break; case BFI_IOC_I2H_DISABLE_REPLY: bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE); break; case BFI_IOC_I2H_GETATTR_REPLY: bfa_ioc_getattr_reply(ioc); break; default: bfa_trc(ioc, msg->mh.msg_id); WARN_ON(1); } } /* * IOC attach time initialization and setup. * * @param[in] ioc memory for IOC * @param[in] bfa driver instance structure */ void bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn, struct bfa_timer_mod_s *timer_mod) { ioc->bfa = bfa; ioc->cbfn = cbfn; ioc->timer_mod = timer_mod; ioc->fcmode = BFA_FALSE; ioc->pllinit = BFA_FALSE; ioc->dbg_fwsave_once = BFA_TRUE; ioc->iocpf.ioc = ioc; bfa_ioc_mbox_attach(ioc); INIT_LIST_HEAD(&ioc->notify_q); bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); bfa_fsm_send_event(ioc, IOC_E_RESET); } /* * Driver detach time IOC cleanup. */ void bfa_ioc_detach(struct bfa_ioc_s *ioc) { bfa_fsm_send_event(ioc, IOC_E_DETACH); INIT_LIST_HEAD(&ioc->notify_q); } /* * Setup IOC PCI properties. * * @param[in] pcidev PCI device information for this IOC */ void bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev, enum bfi_pcifn_class clscode) { ioc->clscode = clscode; ioc->pcidev = *pcidev; /* * Initialize IOC and device personality */ ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC; ioc->asic_mode = BFI_ASIC_MODE_FC; switch (pcidev->device_id) { case BFA_PCI_DEVICE_ID_FC_8G1P: case BFA_PCI_DEVICE_ID_FC_8G2P: ioc->asic_gen = BFI_ASIC_GEN_CB; ioc->fcmode = BFA_TRUE; ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA; ioc->ad_cap_bm = BFA_CM_HBA; break; case BFA_PCI_DEVICE_ID_CT: ioc->asic_gen = BFI_ASIC_GEN_CT; ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH; ioc->asic_mode = BFI_ASIC_MODE_ETH; ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA; ioc->ad_cap_bm = BFA_CM_CNA; break; case BFA_PCI_DEVICE_ID_CT_FC: ioc->asic_gen = BFI_ASIC_GEN_CT; ioc->fcmode = BFA_TRUE; ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA; ioc->ad_cap_bm = BFA_CM_HBA; break; case BFA_PCI_DEVICE_ID_CT2: case BFA_PCI_DEVICE_ID_CT2_QUAD: ioc->asic_gen = BFI_ASIC_GEN_CT2; if (clscode == BFI_PCIFN_CLASS_FC && pcidev->ssid == BFA_PCI_CT2_SSID_FC) { ioc->asic_mode = BFI_ASIC_MODE_FC16; ioc->fcmode = BFA_TRUE; ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA; ioc->ad_cap_bm = BFA_CM_HBA; } else { ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH; ioc->asic_mode = BFI_ASIC_MODE_ETH; if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) { ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA; ioc->ad_cap_bm = BFA_CM_CNA; } else { ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_NIC; ioc->ad_cap_bm = BFA_CM_NIC; } } break; default: WARN_ON(1); } /* * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c */ if (ioc->asic_gen == BFI_ASIC_GEN_CB) bfa_ioc_set_cb_hwif(ioc); else if (ioc->asic_gen == BFI_ASIC_GEN_CT) bfa_ioc_set_ct_hwif(ioc); else { WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2); bfa_ioc_set_ct2_hwif(ioc); bfa_ioc_ct2_poweron(ioc); } bfa_ioc_map_port(ioc); bfa_ioc_reg_init(ioc); } /* * Initialize IOC dma memory * * @param[in] dm_kva kernel virtual address of IOC dma memory * @param[in] dm_pa physical address of IOC dma memory */ void bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa) { /* * dma memory for firmware attribute */ ioc->attr_dma.kva = dm_kva; ioc->attr_dma.pa = dm_pa; ioc->attr = (struct bfi_ioc_attr_s *) dm_kva; } void bfa_ioc_enable(struct bfa_ioc_s *ioc) { bfa_ioc_stats(ioc, ioc_enables); ioc->dbg_fwsave_once = BFA_TRUE; bfa_fsm_send_event(ioc, IOC_E_ENABLE); } void bfa_ioc_disable(struct bfa_ioc_s *ioc) { bfa_ioc_stats(ioc, ioc_disables); bfa_fsm_send_event(ioc, IOC_E_DISABLE); } void bfa_ioc_suspend(struct bfa_ioc_s *ioc) { ioc->dbg_fwsave_once = BFA_TRUE; bfa_fsm_send_event(ioc, IOC_E_HWERROR); } /* * Initialize memory for saving firmware trace. Driver must initialize * trace memory before call bfa_ioc_enable(). */ void bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave) { ioc->dbg_fwsave = dbg_fwsave; ioc->dbg_fwsave_len = BFA_DBG_FWTRC_LEN; } /* * Register mailbox message handler functions * * @param[in] ioc IOC instance * @param[in] mcfuncs message class handler functions */ void bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs) { struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; int mc; for (mc = 0; mc < BFI_MC_MAX; mc++) mod->mbhdlr[mc].cbfn = mcfuncs[mc]; } /* * Register mailbox message handler function, to be called by common modules */ void bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc, bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg) { struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; mod->mbhdlr[mc].cbfn = cbfn; mod->mbhdlr[mc].cbarg = cbarg; } /* * Queue a mailbox command request to firmware. Waits if mailbox is busy. * Responsibility of caller to serialize * * @param[in] ioc IOC instance * @param[i] cmd Mailbox command */ void bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd) { struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; u32 stat; /* * If a previous command is pending, queue new command */ if (!list_empty(&mod->cmd_q)) { list_add_tail(&cmd->qe, &mod->cmd_q); return; } /* * If mailbox is busy, queue command for poll timer */ stat = readl(ioc->ioc_regs.hfn_mbox_cmd); if (stat) { list_add_tail(&cmd->qe, &mod->cmd_q); return; } /* * mailbox is free -- queue command to firmware */ bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); } /* * Handle mailbox interrupts */ void bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc) { struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; struct bfi_mbmsg_s m; int mc; if (bfa_ioc_msgget(ioc, &m)) { /* * Treat IOC message class as special. */ mc = m.mh.msg_class; if (mc == BFI_MC_IOC) { bfa_ioc_isr(ioc, &m); return; } if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL)) return; mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m); } bfa_ioc_lpu_read_stat(ioc); /* * Try to send pending mailbox commands */ bfa_ioc_mbox_poll(ioc); } void bfa_ioc_error_isr(struct bfa_ioc_s *ioc) { bfa_ioc_stats(ioc, ioc_hbfails); ioc->stats.hb_count = ioc->hb_count; bfa_fsm_send_event(ioc, IOC_E_HWERROR); } /* * return true if IOC is disabled */ bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc) { return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled); } /* * return true if IOC firmware is different. */ bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc) { return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) || bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) || bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch); } /* * Check if adapter is disabled -- both IOCs should be in a disabled * state. */ bfa_boolean_t bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc) { u32 ioc_state; if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled)) return BFA_FALSE; ioc_state = bfa_ioc_get_cur_ioc_fwstate(ioc); if (!bfa_ioc_state_disabled(ioc_state)) return BFA_FALSE; if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) { ioc_state = bfa_ioc_get_cur_ioc_fwstate(ioc); if (!bfa_ioc_state_disabled(ioc_state)) return BFA_FALSE; } return BFA_TRUE; } /* * Reset IOC fwstate registers. */ void bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc) { bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_UNINIT); bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_UNINIT); } #define BFA_MFG_NAME "QLogic" void bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc, struct bfa_adapter_attr_s *ad_attr) { struct bfi_ioc_attr_s *ioc_attr; ioc_attr = ioc->attr; bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num); bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver); bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver); bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer); memcpy(&ad_attr->vpd, &ioc_attr->vpd, sizeof(struct bfa_mfg_vpd_s)); ad_attr->nports = bfa_ioc_get_nports(ioc); ad_attr->max_speed = bfa_ioc_speed_sup(ioc); bfa_ioc_get_adapter_model(ioc, ad_attr->model); /* For now, model descr uses same model string */ bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr); ad_attr->card_type = ioc_attr->card_type; ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type); if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop)) ad_attr->prototype = 1; else ad_attr->prototype = 0; ad_attr->pwwn = ioc->attr->pwwn; ad_attr->mac = bfa_ioc_get_mac(ioc); ad_attr->pcie_gen = ioc_attr->pcie_gen; ad_attr->pcie_lanes = ioc_attr->pcie_lanes; ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig; ad_attr->asic_rev = ioc_attr->asic_rev; bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver); ad_attr->cna_capable = bfa_ioc_is_cna(ioc); ad_attr->trunk_capable = (ad_attr->nports > 1) && !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz; ad_attr->mfg_day = ioc_attr->mfg_day; ad_attr->mfg_month = ioc_attr->mfg_month; ad_attr->mfg_year = ioc_attr->mfg_year; memcpy(ad_attr->uuid, ioc_attr->uuid, BFA_ADAPTER_UUID_LEN); } enum bfa_ioc_type_e bfa_ioc_get_type(struct bfa_ioc_s *ioc) { if (ioc->clscode == BFI_PCIFN_CLASS_ETH) return BFA_IOC_TYPE_LL; WARN_ON(ioc->clscode != BFI_PCIFN_CLASS_FC); return (ioc->attr->port_mode == BFI_PORT_MODE_FC) ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE; } void bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num) { memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN); memcpy((void *)serial_num, (void *)ioc->attr->brcd_serialnum, BFA_ADAPTER_SERIAL_NUM_LEN); } void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver) { memset((void *)fw_ver, 0, BFA_VERSION_LEN); memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN); } void bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev) { WARN_ON(!chip_rev); memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN); chip_rev[0] = 'R'; chip_rev[1] = 'e'; chip_rev[2] = 'v'; chip_rev[3] = '-'; chip_rev[4] = ioc->attr->asic_rev; chip_rev[5] = '\0'; } void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver) { memset((void *)optrom_ver, 0, BFA_VERSION_LEN); memcpy(optrom_ver, ioc->attr->optrom_version, BFA_VERSION_LEN); } void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer) { memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN); strscpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN); } void bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model) { struct bfi_ioc_attr_s *ioc_attr; u8 nports = bfa_ioc_get_nports(ioc); WARN_ON(!model); memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN); ioc_attr = ioc->attr; if (bfa_asic_id_ct2(ioc->pcidev.device_id) && (!bfa_mfg_is_mezz(ioc_attr->card_type))) snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u-%u%s", BFA_MFG_NAME, ioc_attr->card_type, nports, "p"); else snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u", BFA_MFG_NAME, ioc_attr->card_type); } enum bfa_ioc_state bfa_ioc_get_state(struct bfa_ioc_s *ioc) { enum bfa_iocpf_state iocpf_st; enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm); if (ioc_st == BFA_IOC_ENABLING || ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) { iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm); switch (iocpf_st) { case BFA_IOCPF_SEMWAIT: ioc_st = BFA_IOC_SEMWAIT; break; case BFA_IOCPF_HWINIT: ioc_st = BFA_IOC_HWINIT; break; case BFA_IOCPF_FWMISMATCH: ioc_st = BFA_IOC_FWMISMATCH; break; case BFA_IOCPF_FAIL: ioc_st = BFA_IOC_FAIL; break; case BFA_IOCPF_INITFAIL: ioc_st = BFA_IOC_INITFAIL; break; default: break; } } return ioc_st; } void bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr) { memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s)); ioc_attr->state = bfa_ioc_get_state(ioc); ioc_attr->port_id = bfa_ioc_portid(ioc); ioc_attr->port_mode = ioc->port_mode; ioc_attr->port_mode_cfg = ioc->port_mode_cfg; ioc_attr->cap_bm = ioc->ad_cap_bm; ioc_attr->ioc_type = bfa_ioc_get_type(ioc); bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr); ioc_attr->pci_attr.device_id = bfa_ioc_devid(ioc); ioc_attr->pci_attr.pcifn = bfa_ioc_pcifn(ioc); ioc_attr->def_fn = (bfa_ioc_pcifn(ioc) == bfa_ioc_portid(ioc)); bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev); } mac_t bfa_ioc_get_mac(struct bfa_ioc_s *ioc) { /* * Check the IOC type and return the appropriate MAC */ if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE) return ioc->attr->fcoe_mac; else return ioc->attr->mac; } mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc) { mac_t m; m = ioc->attr->mfg_mac; if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type)) m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc); else bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]), bfa_ioc_pcifn(ioc)); return m; } /* * Send AEN notification */ void bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event) { struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; struct bfa_aen_entry_s *aen_entry; enum bfa_ioc_type_e ioc_type; bfad_get_aen_entry(bfad, aen_entry); if (!aen_entry) return; ioc_type = bfa_ioc_get_type(ioc); switch (ioc_type) { case BFA_IOC_TYPE_FC: aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn; break; case BFA_IOC_TYPE_FCoE: aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn; aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc); break; case BFA_IOC_TYPE_LL: aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc); break; default: WARN_ON(ioc_type != BFA_IOC_TYPE_FC); break; } /* Send the AEN notification */ aen_entry->aen_data.ioc.ioc_type = ioc_type; bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq, BFA_AEN_CAT_IOC, event); } /* * Retrieve saved firmware trace from a prior IOC failure. */ bfa_status_t bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen) { int tlen; if (ioc->dbg_fwsave_len == 0) return BFA_STATUS_ENOFSAVE; tlen = *trclen; if (tlen > ioc->dbg_fwsave_len) tlen = ioc->dbg_fwsave_len; memcpy(trcdata, ioc->dbg_fwsave, tlen); *trclen = tlen; return BFA_STATUS_OK; } /* * Retrieve saved firmware trace from a prior IOC failure. */ bfa_status_t bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen) { u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc)); int tlen; bfa_status_t status; bfa_trc(ioc, *trclen); tlen = *trclen; if (tlen > BFA_DBG_FWTRC_LEN) tlen = BFA_DBG_FWTRC_LEN; status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen); *trclen = tlen; return status; } static void bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc) { struct bfa_mbox_cmd_s cmd; struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg; bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC, bfa_ioc_portid(ioc)); req->clscode = cpu_to_be16(ioc->clscode); bfa_ioc_mbox_queue(ioc, &cmd); } static void bfa_ioc_fwsync(struct bfa_ioc_s *ioc) { u32 fwsync_iter = 1000; bfa_ioc_send_fwsync(ioc); /* * After sending a fw sync mbox command wait for it to * take effect. We will not wait for a response because * 1. fw_sync mbox cmd doesn't have a response. * 2. Even if we implement that, interrupts might not * be enabled when we call this function. * So, just keep checking if any mbox cmd is pending, and * after waiting for a reasonable amount of time, go ahead. * It is possible that fw has crashed and the mbox command * is never acknowledged. */ while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0) fwsync_iter--; } /* * Dump firmware smem */ bfa_status_t bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf, u32 *offset, int *buflen) { u32 loff; int dlen; bfa_status_t status; u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc); if (*offset >= smem_len) { *offset = *buflen = 0; return BFA_STATUS_EINVAL; } loff = *offset; dlen = *buflen; /* * First smem read, sync smem before proceeding * No need to sync before reading every chunk. */ if (loff == 0) bfa_ioc_fwsync(ioc); if ((loff + dlen) >= smem_len) dlen = smem_len - loff; status = bfa_ioc_smem_read(ioc, buf, loff, dlen); if (status != BFA_STATUS_OK) { *offset = *buflen = 0; return status; } *offset += dlen; if (*offset >= smem_len) *offset = 0; *buflen = dlen; return status; } /* * Firmware statistics */ bfa_status_t bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats) { u32 loff = BFI_IOC_FWSTATS_OFF + \ BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc)); int tlen; bfa_status_t status; if (ioc->stats_busy) { bfa_trc(ioc, ioc->stats_busy); return BFA_STATUS_DEVBUSY; } ioc->stats_busy = BFA_TRUE; tlen = sizeof(struct bfa_fw_stats_s); status = bfa_ioc_smem_read(ioc, stats, loff, tlen); ioc->stats_busy = BFA_FALSE; return status; } bfa_status_t bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc) { u32 loff = BFI_IOC_FWSTATS_OFF + \ BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc)); int tlen; bfa_status_t status; if (ioc->stats_busy) { bfa_trc(ioc, ioc->stats_busy); return BFA_STATUS_DEVBUSY; } ioc->stats_busy = BFA_TRUE; tlen = sizeof(struct bfa_fw_stats_s); status = bfa_ioc_smem_clr(ioc, loff, tlen); ioc->stats_busy = BFA_FALSE; return status; } /* * Save firmware trace if configured. */ void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc) { int tlen; if (ioc->dbg_fwsave_once) { ioc->dbg_fwsave_once = BFA_FALSE; if (ioc->dbg_fwsave_len) { tlen = ioc->dbg_fwsave_len; bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen); } } } /* * Firmware failure detected. Start recovery actions. */ static void bfa_ioc_recover(struct bfa_ioc_s *ioc) { bfa_ioc_stats(ioc, ioc_hbfails); ioc->stats.hb_count = ioc->hb_count; bfa_fsm_send_event(ioc, IOC_E_HBFAIL); } /* * BFA IOC PF private functions */ static void bfa_iocpf_timeout(void *ioc_arg) { struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg; bfa_trc(ioc, 0); bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT); } static void bfa_iocpf_sem_timeout(void *ioc_arg) { struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg; bfa_ioc_hw_sem_get(ioc); } static void bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc) { u32 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc); bfa_trc(ioc, fwstate); if (fwstate == BFI_IOC_DISABLED) { bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY); return; } if (ioc->iocpf.poll_time >= (3 * BFA_IOC_TOV)) bfa_iocpf_timeout(ioc); else { ioc->iocpf.poll_time += BFA_IOC_POLL_TOV; bfa_iocpf_poll_timer_start(ioc); } } static void bfa_iocpf_poll_timeout(void *ioc_arg) { struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg; bfa_ioc_poll_fwinit(ioc); } /* * bfa timer function */ void bfa_timer_beat(struct bfa_timer_mod_s *mod) { struct list_head *qh = &mod->timer_q; struct list_head *qe, *qe_next; struct bfa_timer_s *elem; struct list_head timedout_q; INIT_LIST_HEAD(&timedout_q); qe = bfa_q_next(qh); while (qe != qh) { qe_next = bfa_q_next(qe); elem = (struct bfa_timer_s *) qe; if (elem->timeout <= BFA_TIMER_FREQ) { elem->timeout = 0; list_del(&elem->qe); list_add_tail(&elem->qe, &timedout_q); } else { elem->timeout -= BFA_TIMER_FREQ; } qe = qe_next; /* go to next elem */ } /* * Pop all the timeout entries */ while (!list_empty(&timedout_q)) { bfa_q_deq(&timedout_q, &elem); elem->timercb(elem->arg); } } /* * Should be called with lock protection */ void bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer, void (*timercb) (void *), void *arg, unsigned int timeout) { WARN_ON(timercb == NULL); WARN_ON(bfa_q_is_on_q(&mod->timer_q, timer)); timer->timeout = timeout; timer->timercb = timercb; timer->arg = arg; list_add_tail(&timer->qe, &mod->timer_q); } /* * Should be called with lock protection */ void bfa_timer_stop(struct bfa_timer_s *timer) { WARN_ON(list_empty(&timer->qe)); list_del(&timer->qe); } /* * ASIC block related */ static void bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg) { struct bfa_ablk_cfg_inst_s *cfg_inst; int i, j; u16 be16; for (i = 0; i < BFA_ABLK_MAX; i++) { cfg_inst = &cfg->inst[i]; for (j = 0; j < BFA_ABLK_MAX_PFS; j++) { be16 = cfg_inst->pf_cfg[j].pers; cfg_inst->pf_cfg[j].pers = be16_to_cpu(be16); be16 = cfg_inst->pf_cfg[j].num_qpairs; cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16); be16 = cfg_inst->pf_cfg[j].num_vectors; cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16); be16 = cfg_inst->pf_cfg[j].bw_min; cfg_inst->pf_cfg[j].bw_min = be16_to_cpu(be16); be16 = cfg_inst->pf_cfg[j].bw_max; cfg_inst->pf_cfg[j].bw_max = be16_to_cpu(be16); } } } static void bfa_ablk_isr(void *cbarg, struct bfi_mbmsg_s *msg) { struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg; struct bfi_ablk_i2h_rsp_s *rsp = (struct bfi_ablk_i2h_rsp_s *)msg; bfa_ablk_cbfn_t cbfn; WARN_ON(msg->mh.msg_class != BFI_MC_ABLK); bfa_trc(ablk->ioc, msg->mh.msg_id); switch (msg->mh.msg_id) { case BFI_ABLK_I2H_QUERY: if (rsp->status == BFA_STATUS_OK) { memcpy(ablk->cfg, ablk->dma_addr.kva, sizeof(struct bfa_ablk_cfg_s)); bfa_ablk_config_swap(ablk->cfg); ablk->cfg = NULL; } break; case BFI_ABLK_I2H_ADPT_CONFIG: case BFI_ABLK_I2H_PORT_CONFIG: /* update config port mode */ ablk->ioc->port_mode_cfg = rsp->port_mode; break; case BFI_ABLK_I2H_PF_DELETE: case BFI_ABLK_I2H_PF_UPDATE: case BFI_ABLK_I2H_OPTROM_ENABLE: case BFI_ABLK_I2H_OPTROM_DISABLE: /* No-op */ break; case BFI_ABLK_I2H_PF_CREATE: *(ablk->pcifn) = rsp->pcifn; ablk->pcifn = NULL; break; default: WARN_ON(1); } ablk->busy = BFA_FALSE; if (ablk->cbfn) { cbfn = ablk->cbfn; ablk->cbfn = NULL; cbfn(ablk->cbarg, rsp->status); } } static void bfa_ablk_notify(void *cbarg, enum bfa_ioc_event_e event) { struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg; bfa_trc(ablk->ioc, event); switch (event) { case BFA_IOC_E_ENABLED: WARN_ON(ablk->busy != BFA_FALSE); break; case BFA_IOC_E_DISABLED: case BFA_IOC_E_FAILED: /* Fail any pending requests */ ablk->pcifn = NULL; if (ablk->busy) { if (ablk->cbfn) ablk->cbfn(ablk->cbarg, BFA_STATUS_FAILED); ablk->cbfn = NULL; ablk->busy = BFA_FALSE; } break; default: WARN_ON(1); break; } } u32 bfa_ablk_meminfo(void) { return BFA_ROUNDUP(sizeof(struct bfa_ablk_cfg_s), BFA_DMA_ALIGN_SZ); } void bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa) { ablk->dma_addr.kva = dma_kva; ablk->dma_addr.pa = dma_pa; } void bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc) { ablk->ioc = ioc; bfa_ioc_mbox_regisr(ablk->ioc, BFI_MC_ABLK, bfa_ablk_isr, ablk); bfa_q_qe_init(&ablk->ioc_notify); bfa_ioc_notify_init(&ablk->ioc_notify, bfa_ablk_notify, ablk); list_add_tail(&ablk->ioc_notify.qe, &ablk->ioc->notify_q); } bfa_status_t bfa_ablk_query(struct bfa_ablk_s *ablk, struct bfa_ablk_cfg_s *ablk_cfg, bfa_ablk_cbfn_t cbfn, void *cbarg) { struct bfi_ablk_h2i_query_s *m; WARN_ON(!ablk_cfg); if (!bfa_ioc_is_operational(ablk->ioc)) { bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); return BFA_STATUS_IOC_FAILURE; } if (ablk->busy) { bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); return BFA_STATUS_DEVBUSY; } ablk->cfg = ablk_cfg; ablk->cbfn = cbfn; ablk->cbarg = cbarg; ablk->busy = BFA_TRUE; m = (struct bfi_ablk_h2i_query_s *)ablk->mb.msg; bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_QUERY, bfa_ioc_portid(ablk->ioc)); bfa_dma_be_addr_set(m->addr, ablk->dma_addr.pa); bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); return BFA_STATUS_OK; } bfa_status_t bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn, u8 port, enum bfi_pcifn_class personality, u16 bw_min, u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg) { struct bfi_ablk_h2i_pf_req_s *m; if (!bfa_ioc_is_operational(ablk->ioc)) { bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); return BFA_STATUS_IOC_FAILURE; } if (ablk->busy) { bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); return BFA_STATUS_DEVBUSY; } ablk->pcifn = pcifn; ablk->cbfn = cbfn; ablk->cbarg = cbarg; ablk->busy = BFA_TRUE; m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg; bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE, bfa_ioc_portid(ablk->ioc)); m->pers = cpu_to_be16((u16)personality); m->bw_min = cpu_to_be16(bw_min); m->bw_max = cpu_to_be16(bw_max); m->port = port; bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); return BFA_STATUS_OK; } bfa_status_t bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn, bfa_ablk_cbfn_t cbfn, void *cbarg) { struct bfi_ablk_h2i_pf_req_s *m; if (!bfa_ioc_is_operational(ablk->ioc)) { bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); return BFA_STATUS_IOC_FAILURE; } if (ablk->busy) { bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); return BFA_STATUS_DEVBUSY; } ablk->cbfn = cbfn; ablk->cbarg = cbarg; ablk->busy = BFA_TRUE; m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg; bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_DELETE, bfa_ioc_portid(ablk->ioc)); m->pcifn = (u8)pcifn; bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); return BFA_STATUS_OK; } bfa_status_t bfa_ablk_adapter_config(struct bfa_ablk_s *ablk, enum bfa_mode_s mode, int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg) { struct bfi_ablk_h2i_cfg_req_s *m; if (!bfa_ioc_is_operational(ablk->ioc)) { bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); return BFA_STATUS_IOC_FAILURE; } if (ablk->busy) { bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); return BFA_STATUS_DEVBUSY; } ablk->cbfn = cbfn; ablk->cbarg = cbarg; ablk->busy = BFA_TRUE; m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg; bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_ADPT_CONFIG, bfa_ioc_portid(ablk->ioc)); m->mode = (u8)mode; m->max_pf = (u8)max_pf; m->max_vf = (u8)max_vf; bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); return BFA_STATUS_OK; } bfa_status_t bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, enum bfa_mode_s mode, int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg) { struct bfi_ablk_h2i_cfg_req_s *m; if (!bfa_ioc_is_operational(ablk->ioc)) { bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); return BFA_STATUS_IOC_FAILURE; } if (ablk->busy) { bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); return BFA_STATUS_DEVBUSY; } ablk->cbfn = cbfn; ablk->cbarg = cbarg; ablk->busy = BFA_TRUE; m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg; bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PORT_CONFIG, bfa_ioc_portid(ablk->ioc)); m->port = (u8)port; m->mode = (u8)mode; m->max_pf = (u8)max_pf; m->max_vf = (u8)max_vf; bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); return BFA_STATUS_OK; } bfa_status_t bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, u16 bw_min, u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg) { struct bfi_ablk_h2i_pf_req_s *m; if (!bfa_ioc_is_operational(ablk->ioc)) { bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); return BFA_STATUS_IOC_FAILURE; } if (ablk->busy) { bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); return BFA_STATUS_DEVBUSY; } ablk->cbfn = cbfn; ablk->cbarg = cbarg; ablk->busy = BFA_TRUE; m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg; bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE, bfa_ioc_portid(ablk->ioc)); m->pcifn = (u8)pcifn; m->bw_min = cpu_to_be16(bw_min); m->bw_max = cpu_to_be16(bw_max); bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); return BFA_STATUS_OK; } bfa_status_t bfa_ablk_optrom_en(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg) { struct bfi_ablk_h2i_optrom_s *m; if (!bfa_ioc_is_operational(ablk->ioc)) { bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); return BFA_STATUS_IOC_FAILURE; } if (ablk->busy) { bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); return BFA_STATUS_DEVBUSY; } ablk->cbfn = cbfn; ablk->cbarg = cbarg; ablk->busy = BFA_TRUE; m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg; bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_ENABLE, bfa_ioc_portid(ablk->ioc)); bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); return BFA_STATUS_OK; } bfa_status_t bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg) { struct bfi_ablk_h2i_optrom_s *m; if (!bfa_ioc_is_operational(ablk->ioc)) { bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); return BFA_STATUS_IOC_FAILURE; } if (ablk->busy) { bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); return BFA_STATUS_DEVBUSY; } ablk->cbfn = cbfn; ablk->cbarg = cbarg; ablk->busy = BFA_TRUE; m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg; bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_DISABLE, bfa_ioc_portid(ablk->ioc)); bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); return BFA_STATUS_OK; } /* * SFP module specific */ /* forward declarations */ static void bfa_sfp_getdata_send(struct bfa_sfp_s *sfp); static void bfa_sfp_media_get(struct bfa_sfp_s *sfp); static bfa_status_t bfa_sfp_speed_valid(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed); static void bfa_cb_sfp_show(struct bfa_sfp_s *sfp) { bfa_trc(sfp, sfp->lock); if (sfp->cbfn) sfp->cbfn(sfp->cbarg, sfp->status); sfp->lock = 0; sfp->cbfn = NULL; } static void bfa_cb_sfp_state_query(struct bfa_sfp_s *sfp) { bfa_trc(sfp, sfp->portspeed); if (sfp->media) { bfa_sfp_media_get(sfp); if (sfp->state_query_cbfn) sfp->state_query_cbfn(sfp->state_query_cbarg, sfp->status); sfp->media = NULL; } if (sfp->portspeed) { sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed); if (sfp->state_query_cbfn) sfp->state_query_cbfn(sfp->state_query_cbarg, sfp->status); sfp->portspeed = BFA_PORT_SPEED_UNKNOWN; } sfp->state_query_lock = 0; sfp->state_query_cbfn = NULL; } /* * IOC event handler. */ static void bfa_sfp_notify(void *sfp_arg, enum bfa_ioc_event_e event) { struct bfa_sfp_s *sfp = sfp_arg; bfa_trc(sfp, event); bfa_trc(sfp, sfp->lock); bfa_trc(sfp, sfp->state_query_lock); switch (event) { case BFA_IOC_E_DISABLED: case BFA_IOC_E_FAILED: if (sfp->lock) { sfp->status = BFA_STATUS_IOC_FAILURE; bfa_cb_sfp_show(sfp); } if (sfp->state_query_lock) { sfp->status = BFA_STATUS_IOC_FAILURE; bfa_cb_sfp_state_query(sfp); } break; default: break; } } /* * SFP's State Change Notification post to AEN */ static void bfa_sfp_scn_aen_post(struct bfa_sfp_s *sfp, struct bfi_sfp_scn_s *rsp) { struct bfad_s *bfad = (struct bfad_s *)sfp->ioc->bfa->bfad; struct bfa_aen_entry_s *aen_entry; enum bfa_port_aen_event aen_evt = 0; bfa_trc(sfp, (((u64)rsp->pomlvl) << 16) | (((u64)rsp->sfpid) << 8) | ((u64)rsp->event)); bfad_get_aen_entry(bfad, aen_entry); if (!aen_entry) return; aen_entry->aen_data.port.ioc_type = bfa_ioc_get_type(sfp->ioc); aen_entry->aen_data.port.pwwn = sfp->ioc->attr->pwwn; aen_entry->aen_data.port.mac = bfa_ioc_get_mac(sfp->ioc); switch (rsp->event) { case BFA_SFP_SCN_INSERTED: aen_evt = BFA_PORT_AEN_SFP_INSERT; break; case BFA_SFP_SCN_REMOVED: aen_evt = BFA_PORT_AEN_SFP_REMOVE; break; case BFA_SFP_SCN_FAILED: aen_evt = BFA_PORT_AEN_SFP_ACCESS_ERROR; break; case BFA_SFP_SCN_UNSUPPORT: aen_evt = BFA_PORT_AEN_SFP_UNSUPPORT; break; case BFA_SFP_SCN_POM: aen_evt = BFA_PORT_AEN_SFP_POM; aen_entry->aen_data.port.level = rsp->pomlvl; break; default: bfa_trc(sfp, rsp->event); WARN_ON(1); } /* Send the AEN notification */ bfad_im_post_vendor_event(aen_entry, bfad, ++sfp->ioc->ioc_aen_seq, BFA_AEN_CAT_PORT, aen_evt); } /* * SFP get data send */ static void bfa_sfp_getdata_send(struct bfa_sfp_s *sfp) { struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg; bfa_trc(sfp, req->memtype); /* build host command */ bfi_h2i_set(req->mh, BFI_MC_SFP, BFI_SFP_H2I_SHOW, bfa_ioc_portid(sfp->ioc)); /* send mbox cmd */ bfa_ioc_mbox_queue(sfp->ioc, &sfp->mbcmd); } /* * SFP is valid, read sfp data */ static void bfa_sfp_getdata(struct bfa_sfp_s *sfp, enum bfi_sfp_mem_e memtype) { struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg; WARN_ON(sfp->lock != 0); bfa_trc(sfp, sfp->state); sfp->lock = 1; sfp->memtype = memtype; req->memtype = memtype; /* Setup SG list */ bfa_alen_set(&req->alen, sizeof(struct sfp_mem_s), sfp->dbuf_pa); bfa_sfp_getdata_send(sfp); } /* * SFP scn handler */ static void bfa_sfp_scn(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg) { struct bfi_sfp_scn_s *rsp = (struct bfi_sfp_scn_s *) msg; switch (rsp->event) { case BFA_SFP_SCN_INSERTED: sfp->state = BFA_SFP_STATE_INSERTED; sfp->data_valid = 0; bfa_sfp_scn_aen_post(sfp, rsp); break; case BFA_SFP_SCN_REMOVED: sfp->state = BFA_SFP_STATE_REMOVED; sfp->data_valid = 0; bfa_sfp_scn_aen_post(sfp, rsp); break; case BFA_SFP_SCN_FAILED: sfp->state = BFA_SFP_STATE_FAILED; sfp->data_valid = 0; bfa_sfp_scn_aen_post(sfp, rsp); break; case BFA_SFP_SCN_UNSUPPORT: sfp->state = BFA_SFP_STATE_UNSUPPORT; bfa_sfp_scn_aen_post(sfp, rsp); if (!sfp->lock) bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL); break; case BFA_SFP_SCN_POM: bfa_sfp_scn_aen_post(sfp, rsp); break; case BFA_SFP_SCN_VALID: sfp->state = BFA_SFP_STATE_VALID; if (!sfp->lock) bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL); break; default: bfa_trc(sfp, rsp->event); WARN_ON(1); } } /* * SFP show complete */ static void bfa_sfp_show_comp(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg) { struct bfi_sfp_rsp_s *rsp = (struct bfi_sfp_rsp_s *) msg; if (!sfp->lock) { /* * receiving response after ioc failure */ bfa_trc(sfp, sfp->lock); return; } bfa_trc(sfp, rsp->status); if (rsp->status == BFA_STATUS_OK) { sfp->data_valid = 1; if (sfp->state == BFA_SFP_STATE_VALID) sfp->status = BFA_STATUS_OK; else if (sfp->state == BFA_SFP_STATE_UNSUPPORT) sfp->status = BFA_STATUS_SFP_UNSUPP; else bfa_trc(sfp, sfp->state); } else { sfp->data_valid = 0; sfp->status = rsp->status; /* sfpshow shouldn't change sfp state */ } bfa_trc(sfp, sfp->memtype); if (sfp->memtype == BFI_SFP_MEM_DIAGEXT) { bfa_trc(sfp, sfp->data_valid); if (sfp->data_valid) { u32 size = sizeof(struct sfp_mem_s); u8 *des = (u8 *)(sfp->sfpmem); memcpy(des, sfp->dbuf_kva, size); } /* * Queue completion callback. */ bfa_cb_sfp_show(sfp); } else sfp->lock = 0; bfa_trc(sfp, sfp->state_query_lock); if (sfp->state_query_lock) { sfp->state = rsp->state; /* Complete callback */ bfa_cb_sfp_state_query(sfp); } } /* * SFP query fw sfp state */ static void bfa_sfp_state_query(struct bfa_sfp_s *sfp) { struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg; /* Should not be doing query if not in _INIT state */ WARN_ON(sfp->state != BFA_SFP_STATE_INIT); WARN_ON(sfp->state_query_lock != 0); bfa_trc(sfp, sfp->state); sfp->state_query_lock = 1; req->memtype = 0; if (!sfp->lock) bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL); } static void bfa_sfp_media_get(struct bfa_sfp_s *sfp) { enum bfa_defs_sfp_media_e *media = sfp->media; *media = BFA_SFP_MEDIA_UNKNOWN; if (sfp->state == BFA_SFP_STATE_UNSUPPORT) *media = BFA_SFP_MEDIA_UNSUPPORT; else if (sfp->state == BFA_SFP_STATE_VALID) { union sfp_xcvr_e10g_code_u e10g; struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva; u16 xmtr_tech = (sfpmem->srlid_base.xcvr[4] & 0x3) << 7 | (sfpmem->srlid_base.xcvr[5] >> 1); e10g.b = sfpmem->srlid_base.xcvr[0]; bfa_trc(sfp, e10g.b); bfa_trc(sfp, xmtr_tech); /* check fc transmitter tech */ if ((xmtr_tech & SFP_XMTR_TECH_CU) || (xmtr_tech & SFP_XMTR_TECH_CP) || (xmtr_tech & SFP_XMTR_TECH_CA)) *media = BFA_SFP_MEDIA_CU; else if ((xmtr_tech & SFP_XMTR_TECH_EL_INTRA) || (xmtr_tech & SFP_XMTR_TECH_EL_INTER)) *media = BFA_SFP_MEDIA_EL; else if ((xmtr_tech & SFP_XMTR_TECH_LL) || (xmtr_tech & SFP_XMTR_TECH_LC)) *media = BFA_SFP_MEDIA_LW; else if ((xmtr_tech & SFP_XMTR_TECH_SL) || (xmtr_tech & SFP_XMTR_TECH_SN) || (xmtr_tech & SFP_XMTR_TECH_SA)) *media = BFA_SFP_MEDIA_SW; /* Check 10G Ethernet Compilance code */ else if (e10g.r.e10g_sr) *media = BFA_SFP_MEDIA_SW; else if (e10g.r.e10g_lrm && e10g.r.e10g_lr) *media = BFA_SFP_MEDIA_LW; else if (e10g.r.e10g_unall) *media = BFA_SFP_MEDIA_UNKNOWN; else bfa_trc(sfp, 0); } else bfa_trc(sfp, sfp->state); } static bfa_status_t bfa_sfp_speed_valid(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed) { struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva; struct sfp_xcvr_s *xcvr = (struct sfp_xcvr_s *) sfpmem->srlid_base.xcvr; union sfp_xcvr_fc3_code_u fc3 = xcvr->fc3; union sfp_xcvr_e10g_code_u e10g = xcvr->e10g; if (portspeed == BFA_PORT_SPEED_10GBPS) { if (e10g.r.e10g_sr || e10g.r.e10g_lr) return BFA_STATUS_OK; else { bfa_trc(sfp, e10g.b); return BFA_STATUS_UNSUPP_SPEED; } } if (((portspeed & BFA_PORT_SPEED_16GBPS) && fc3.r.mb1600) || ((portspeed & BFA_PORT_SPEED_8GBPS) && fc3.r.mb800) || ((portspeed & BFA_PORT_SPEED_4GBPS) && fc3.r.mb400) || ((portspeed & BFA_PORT_SPEED_2GBPS) && fc3.r.mb200) || ((portspeed & BFA_PORT_SPEED_1GBPS) && fc3.r.mb100)) return BFA_STATUS_OK; else { bfa_trc(sfp, portspeed); bfa_trc(sfp, fc3.b); bfa_trc(sfp, e10g.b); return BFA_STATUS_UNSUPP_SPEED; } } /* * SFP hmbox handler */ void bfa_sfp_intr(void *sfparg, struct bfi_mbmsg_s *msg) { struct bfa_sfp_s *sfp = sfparg; switch (msg->mh.msg_id) { case BFI_SFP_I2H_SHOW: bfa_sfp_show_comp(sfp, msg); break; case BFI_SFP_I2H_SCN: bfa_sfp_scn(sfp, msg); break; default: bfa_trc(sfp, msg->mh.msg_id); WARN_ON(1); } } /* * Return DMA memory needed by sfp module. */ u32 bfa_sfp_meminfo(void) { return BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ); } /* * Attach virtual and physical memory for SFP. */ void bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc, void *dev, struct bfa_trc_mod_s *trcmod) { sfp->dev = dev; sfp->ioc = ioc; sfp->trcmod = trcmod; sfp->cbfn = NULL; sfp->cbarg = NULL; sfp->sfpmem = NULL; sfp->lock = 0; sfp->data_valid = 0; sfp->state = BFA_SFP_STATE_INIT; sfp->state_query_lock = 0; sfp->state_query_cbfn = NULL; sfp->state_query_cbarg = NULL; sfp->media = NULL; sfp->portspeed = BFA_PORT_SPEED_UNKNOWN; sfp->is_elb = BFA_FALSE; bfa_ioc_mbox_regisr(sfp->ioc, BFI_MC_SFP, bfa_sfp_intr, sfp); bfa_q_qe_init(&sfp->ioc_notify); bfa_ioc_notify_init(&sfp->ioc_notify, bfa_sfp_notify, sfp); list_add_tail(&sfp->ioc_notify.qe, &sfp->ioc->notify_q); } /* * Claim Memory for SFP */ void bfa_sfp_memclaim(struct bfa_sfp_s *sfp, u8 *dm_kva, u64 dm_pa) { sfp->dbuf_kva = dm_kva; sfp->dbuf_pa = dm_pa; memset(sfp->dbuf_kva, 0, sizeof(struct sfp_mem_s)); dm_kva += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ); dm_pa += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ); } /* * Show SFP eeprom content * * @param[in] sfp - bfa sfp module * * @param[out] sfpmem - sfp eeprom data * */ bfa_status_t bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem, bfa_cb_sfp_t cbfn, void *cbarg) { if (!bfa_ioc_is_operational(sfp->ioc)) { bfa_trc(sfp, 0); return BFA_STATUS_IOC_NON_OP; } if (sfp->lock) { bfa_trc(sfp, 0); return BFA_STATUS_DEVBUSY; } sfp->cbfn = cbfn; sfp->cbarg = cbarg; sfp->sfpmem = sfpmem; bfa_sfp_getdata(sfp, BFI_SFP_MEM_DIAGEXT); return BFA_STATUS_OK; } /* * Return SFP Media type * * @param[in] sfp - bfa sfp module * * @param[out] media - port speed from user * */ bfa_status_t bfa_sfp_media(struct bfa_sfp_s *sfp, enum bfa_defs_sfp_media_e *media, bfa_cb_sfp_t cbfn, void *cbarg) { if (!bfa_ioc_is_operational(sfp->ioc)) { bfa_trc(sfp, 0); return BFA_STATUS_IOC_NON_OP; } sfp->media = media; if (sfp->state == BFA_SFP_STATE_INIT) { if (sfp->state_query_lock) { bfa_trc(sfp, 0); return BFA_STATUS_DEVBUSY; } else { sfp->state_query_cbfn = cbfn; sfp->state_query_cbarg = cbarg; bfa_sfp_state_query(sfp); return BFA_STATUS_SFP_NOT_READY; } } bfa_sfp_media_get(sfp); return BFA_STATUS_OK; } /* * Check if user set port speed is allowed by the SFP * * @param[in] sfp - bfa sfp module * @param[in] portspeed - port speed from user * */ bfa_status_t bfa_sfp_speed(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed, bfa_cb_sfp_t cbfn, void *cbarg) { WARN_ON(portspeed == BFA_PORT_SPEED_UNKNOWN); if (!bfa_ioc_is_operational(sfp->ioc)) return BFA_STATUS_IOC_NON_OP; /* For Mezz card, all speed is allowed */ if (bfa_mfg_is_mezz(sfp->ioc->attr->card_type)) return BFA_STATUS_OK; /* Check SFP state */ sfp->portspeed = portspeed; if (sfp->state == BFA_SFP_STATE_INIT) { if (sfp->state_query_lock) { bfa_trc(sfp, 0); return BFA_STATUS_DEVBUSY; } else { sfp->state_query_cbfn = cbfn; sfp->state_query_cbarg = cbarg; bfa_sfp_state_query(sfp); return BFA_STATUS_SFP_NOT_READY; } } if (sfp->state == BFA_SFP_STATE_REMOVED || sfp->state == BFA_SFP_STATE_FAILED) { bfa_trc(sfp, sfp->state); return BFA_STATUS_NO_SFP_DEV; } if (sfp->state == BFA_SFP_STATE_INSERTED) { bfa_trc(sfp, sfp->state); return BFA_STATUS_DEVBUSY; /* sfp is reading data */ } /* For eloopback, all speed is allowed */ if (sfp->is_elb) return BFA_STATUS_OK; return bfa_sfp_speed_valid(sfp, portspeed); } /* * Flash module specific */ /* * FLASH DMA buffer should be big enough to hold both MFG block and * asic block(64k) at the same time and also should be 2k aligned to * avoid write segement to cross sector boundary. */ #define BFA_FLASH_SEG_SZ 2048 #define BFA_FLASH_DMA_BUF_SZ \ BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ) static void bfa_flash_aen_audit_post(struct bfa_ioc_s *ioc, enum bfa_audit_aen_event event, int inst, int type) { struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; struct bfa_aen_entry_s *aen_entry; bfad_get_aen_entry(bfad, aen_entry); if (!aen_entry) return; aen_entry->aen_data.audit.pwwn = ioc->attr->pwwn; aen_entry->aen_data.audit.partition_inst = inst; aen_entry->aen_data.audit.partition_type = type; /* Send the AEN notification */ bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq, BFA_AEN_CAT_AUDIT, event); } static void bfa_flash_cb(struct bfa_flash_s *flash) { flash->op_busy = 0; if (flash->cbfn) flash->cbfn(flash->cbarg, flash->status); } static void bfa_flash_notify(void *cbarg, enum bfa_ioc_event_e event) { struct bfa_flash_s *flash = cbarg; bfa_trc(flash, event); switch (event) { case BFA_IOC_E_DISABLED: case BFA_IOC_E_FAILED: if (flash->op_busy) { flash->status = BFA_STATUS_IOC_FAILURE; flash->cbfn(flash->cbarg, flash->status); flash->op_busy = 0; } break; default: break; } } /* * Send flash attribute query request. * * @param[in] cbarg - callback argument */ static void bfa_flash_query_send(void *cbarg) { struct bfa_flash_s *flash = cbarg; struct bfi_flash_query_req_s *msg = (struct bfi_flash_query_req_s *) flash->mb.msg; bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ, bfa_ioc_portid(flash->ioc)); bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr_s), flash->dbuf_pa); bfa_ioc_mbox_queue(flash->ioc, &flash->mb); } /* * Send flash write request. * * @param[in] cbarg - callback argument */ static void bfa_flash_write_send(struct bfa_flash_s *flash) { struct bfi_flash_write_req_s *msg = (struct bfi_flash_write_req_s *) flash->mb.msg; u32 len; msg->type = be32_to_cpu(flash->type); msg->instance = flash->instance; msg->offset = be32_to_cpu(flash->addr_off + flash->offset); len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ? flash->residue : BFA_FLASH_DMA_BUF_SZ; msg->length = be32_to_cpu(len); /* indicate if it's the last msg of the whole write operation */ msg->last = (len == flash->residue) ? 1 : 0; bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ, bfa_ioc_portid(flash->ioc)); bfa_alen_set(&msg->alen, len, flash->dbuf_pa); memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len); bfa_ioc_mbox_queue(flash->ioc, &flash->mb); flash->residue -= len; flash->offset += len; } /* * Send flash read request. * * @param[in] cbarg - callback argument */ static void bfa_flash_read_send(void *cbarg) { struct bfa_flash_s *flash = cbarg; struct bfi_flash_read_req_s *msg = (struct bfi_flash_read_req_s *) flash->mb.msg; u32 len; msg->type = be32_to_cpu(flash->type); msg->instance = flash->instance; msg->offset = be32_to_cpu(flash->addr_off + flash->offset); len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ? flash->residue : BFA_FLASH_DMA_BUF_SZ; msg->length = be32_to_cpu(len); bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ, bfa_ioc_portid(flash->ioc)); bfa_alen_set(&msg->alen, len, flash->dbuf_pa); bfa_ioc_mbox_queue(flash->ioc, &flash->mb); } /* * Send flash erase request. * * @param[in] cbarg - callback argument */ static void bfa_flash_erase_send(void *cbarg) { struct bfa_flash_s *flash = cbarg; struct bfi_flash_erase_req_s *msg = (struct bfi_flash_erase_req_s *) flash->mb.msg; msg->type = be32_to_cpu(flash->type); msg->instance = flash->instance; bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_ERASE_REQ, bfa_ioc_portid(flash->ioc)); bfa_ioc_mbox_queue(flash->ioc, &flash->mb); } /* * Process flash response messages upon receiving interrupts. * * @param[in] flasharg - flash structure * @param[in] msg - message structure */ static void bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg) { struct bfa_flash_s *flash = flasharg; u32 status; union { struct bfi_flash_query_rsp_s *query; struct bfi_flash_erase_rsp_s *erase; struct bfi_flash_write_rsp_s *write; struct bfi_flash_read_rsp_s *read; struct bfi_flash_event_s *event; struct bfi_mbmsg_s *msg; } m; m.msg = msg; bfa_trc(flash, msg->mh.msg_id); if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) { /* receiving response after ioc failure */ bfa_trc(flash, 0x9999); return; } switch (msg->mh.msg_id) { case BFI_FLASH_I2H_QUERY_RSP: status = be32_to_cpu(m.query->status); bfa_trc(flash, status); if (status == BFA_STATUS_OK) { u32 i; struct bfa_flash_attr_s *attr, *f; attr = (struct bfa_flash_attr_s *) flash->ubuf; f = (struct bfa_flash_attr_s *) flash->dbuf_kva; attr->status = be32_to_cpu(f->status); attr->npart = be32_to_cpu(f->npart); bfa_trc(flash, attr->status); bfa_trc(flash, attr->npart); for (i = 0; i < attr->npart; i++) { attr->part[i].part_type = be32_to_cpu(f->part[i].part_type); attr->part[i].part_instance = be32_to_cpu(f->part[i].part_instance); attr->part[i].part_off = be32_to_cpu(f->part[i].part_off); attr->part[i].part_size = be32_to_cpu(f->part[i].part_size); attr->part[i].part_len = be32_to_cpu(f->part[i].part_len); attr->part[i].part_status = be32_to_cpu(f->part[i].part_status); } } flash->status = status; bfa_flash_cb(flash); break; case BFI_FLASH_I2H_ERASE_RSP: status = be32_to_cpu(m.erase->status); bfa_trc(flash, status); flash->status = status; bfa_flash_cb(flash); break; case BFI_FLASH_I2H_WRITE_RSP: status = be32_to_cpu(m.write->status); bfa_trc(flash, status); if (status != BFA_STATUS_OK || flash->residue == 0) { flash->status = status; bfa_flash_cb(flash); } else { bfa_trc(flash, flash->offset); bfa_flash_write_send(flash); } break; case BFI_FLASH_I2H_READ_RSP: status = be32_to_cpu(m.read->status); bfa_trc(flash, status); if (status != BFA_STATUS_OK) { flash->status = status; bfa_flash_cb(flash); } else { u32 len = be32_to_cpu(m.read->length); bfa_trc(flash, flash->offset); bfa_trc(flash, len); memcpy(flash->ubuf + flash->offset, flash->dbuf_kva, len); flash->residue -= len; flash->offset += len; if (flash->residue == 0) { flash->status = status; bfa_flash_cb(flash); } else bfa_flash_read_send(flash); } break; case BFI_FLASH_I2H_BOOT_VER_RSP: break; case BFI_FLASH_I2H_EVENT: status = be32_to_cpu(m.event->status); bfa_trc(flash, status); if (status == BFA_STATUS_BAD_FWCFG) bfa_ioc_aen_post(flash->ioc, BFA_IOC_AEN_FWCFG_ERROR); else if (status == BFA_STATUS_INVALID_VENDOR) { u32 param; param = be32_to_cpu(m.event->param); bfa_trc(flash, param); bfa_ioc_aen_post(flash->ioc, BFA_IOC_AEN_INVALID_VENDOR); } break; default: WARN_ON(1); } } /* * Flash memory info API. * * @param[in] mincfg - minimal cfg variable */ u32 bfa_flash_meminfo(bfa_boolean_t mincfg) { /* min driver doesn't need flash */ if (mincfg) return 0; return BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); } /* * Flash attach API. * * @param[in] flash - flash structure * @param[in] ioc - ioc structure * @param[in] dev - device structure * @param[in] trcmod - trace module * @param[in] logmod - log module */ void bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc, void *dev, struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg) { flash->ioc = ioc; flash->trcmod = trcmod; flash->cbfn = NULL; flash->cbarg = NULL; flash->op_busy = 0; bfa_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash); bfa_q_qe_init(&flash->ioc_notify); bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash); list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q); /* min driver doesn't need flash */ if (mincfg) { flash->dbuf_kva = NULL; flash->dbuf_pa = 0; } } /* * Claim memory for flash * * @param[in] flash - flash structure * @param[in] dm_kva - pointer to virtual memory address * @param[in] dm_pa - physical memory address * @param[in] mincfg - minimal cfg variable */ void bfa_flash_memclaim(struct bfa_flash_s *flash, u8 *dm_kva, u64 dm_pa, bfa_boolean_t mincfg) { if (mincfg) return; flash->dbuf_kva = dm_kva; flash->dbuf_pa = dm_pa; memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ); dm_kva += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); dm_pa += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); } /* * Get flash attribute. * * @param[in] flash - flash structure * @param[in] attr - flash attribute structure * @param[in] cbfn - callback function * @param[in] cbarg - callback argument * * Return status. */ bfa_status_t bfa_flash_get_attr(struct bfa_flash_s *flash, struct bfa_flash_attr_s *attr, bfa_cb_flash_t cbfn, void *cbarg) { bfa_trc(flash, BFI_FLASH_H2I_QUERY_REQ); if (!bfa_ioc_is_operational(flash->ioc)) return BFA_STATUS_IOC_NON_OP; if (flash->op_busy) { bfa_trc(flash, flash->op_busy); return BFA_STATUS_DEVBUSY; } flash->op_busy = 1; flash->cbfn = cbfn; flash->cbarg = cbarg; flash->ubuf = (u8 *) attr; bfa_flash_query_send(flash); return BFA_STATUS_OK; } /* * Erase flash partition. * * @param[in] flash - flash structure * @param[in] type - flash partition type * @param[in] instance - flash partition instance * @param[in] cbfn - callback function * @param[in] cbarg - callback argument * * Return status. */ bfa_status_t bfa_flash_erase_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type, u8 instance, bfa_cb_flash_t cbfn, void *cbarg) { bfa_trc(flash, BFI_FLASH_H2I_ERASE_REQ); bfa_trc(flash, type); bfa_trc(flash, instance); if (!bfa_ioc_is_operational(flash->ioc)) return BFA_STATUS_IOC_NON_OP; if (flash->op_busy) { bfa_trc(flash, flash->op_busy); return BFA_STATUS_DEVBUSY; } flash->op_busy = 1; flash->cbfn = cbfn; flash->cbarg = cbarg; flash->type = type; flash->instance = instance; bfa_flash_erase_send(flash); bfa_flash_aen_audit_post(flash->ioc, BFA_AUDIT_AEN_FLASH_ERASE, instance, type); return BFA_STATUS_OK; } /* * Update flash partition. * * @param[in] flash - flash structure * @param[in] type - flash partition type * @param[in] instance - flash partition instance * @param[in] buf - update data buffer * @param[in] len - data buffer length * @param[in] offset - offset relative to the partition starting address * @param[in] cbfn - callback function * @param[in] cbarg - callback argument * * Return status. */ bfa_status_t bfa_flash_update_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type, u8 instance, void *buf, u32 len, u32 offset, bfa_cb_flash_t cbfn, void *cbarg) { bfa_trc(flash, BFI_FLASH_H2I_WRITE_REQ); bfa_trc(flash, type); bfa_trc(flash, instance); bfa_trc(flash, len); bfa_trc(flash, offset); if (!bfa_ioc_is_operational(flash->ioc)) return BFA_STATUS_IOC_NON_OP; /* * 'len' must be in word (4-byte) boundary * 'offset' must be in sector (16kb) boundary */ if (!len || (len & 0x03) || (offset & 0x00003FFF)) return BFA_STATUS_FLASH_BAD_LEN; if (type == BFA_FLASH_PART_MFG) return BFA_STATUS_EINVAL; if (flash->op_busy) { bfa_trc(flash, flash->op_busy); return BFA_STATUS_DEVBUSY; } flash->op_busy = 1; flash->cbfn = cbfn; flash->cbarg = cbarg; flash->type = type; flash->instance = instance; flash->residue = len; flash->offset = 0; flash->addr_off = offset; flash->ubuf = buf; bfa_flash_write_send(flash); return BFA_STATUS_OK; } /* * Read flash partition. * * @param[in] flash - flash structure * @param[in] type - flash partition type * @param[in] instance - flash partition instance * @param[in] buf - read data buffer * @param[in] len - data buffer length * @param[in] offset - offset relative to the partition starting address * @param[in] cbfn - callback function * @param[in] cbarg - callback argument * * Return status. */ bfa_status_t bfa_flash_read_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type, u8 instance, void *buf, u32 len, u32 offset, bfa_cb_flash_t cbfn, void *cbarg) { bfa_trc(flash, BFI_FLASH_H2I_READ_REQ); bfa_trc(flash, type); bfa_trc(flash, instance); bfa_trc(flash, len); bfa_trc(flash, offset); if (!bfa_ioc_is_operational(flash->ioc)) return BFA_STATUS_IOC_NON_OP; /* * 'len' must be in word (4-byte) boundary * 'offset' must be in sector (16kb) boundary */ if (!len || (len & 0x03) || (offset & 0x00003FFF)) return BFA_STATUS_FLASH_BAD_LEN; if (flash->op_busy) { bfa_trc(flash, flash->op_busy); return BFA_STATUS_DEVBUSY; } flash->op_busy = 1; flash->cbfn = cbfn; flash->cbarg = cbarg; flash->type = type; flash->instance = instance; flash->residue = len; flash->offset = 0; flash->addr_off = offset; flash->ubuf = buf; bfa_flash_read_send(flash); return BFA_STATUS_OK; } /* * DIAG module specific */ #define BFA_DIAG_MEMTEST_TOV 50000 /* memtest timeout in msec */ #define CT2_BFA_DIAG_MEMTEST_TOV (9*30*1000) /* 4.5 min */ /* IOC event handler */ static void bfa_diag_notify(void *diag_arg, enum bfa_ioc_event_e event) { struct bfa_diag_s *diag = diag_arg; bfa_trc(diag, event); bfa_trc(diag, diag->block); bfa_trc(diag, diag->fwping.lock); bfa_trc(diag, diag->tsensor.lock); switch (event) { case BFA_IOC_E_DISABLED: case BFA_IOC_E_FAILED: if (diag->fwping.lock) { diag->fwping.status = BFA_STATUS_IOC_FAILURE; diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status); diag->fwping.lock = 0; } if (diag->tsensor.lock) { diag->tsensor.status = BFA_STATUS_IOC_FAILURE; diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status); diag->tsensor.lock = 0; } if (diag->block) { if (diag->timer_active) { bfa_timer_stop(&diag->timer); diag->timer_active = 0; } diag->status = BFA_STATUS_IOC_FAILURE; diag->cbfn(diag->cbarg, diag->status); diag->block = 0; } break; default: break; } } static void bfa_diag_memtest_done(void *cbarg) { struct bfa_diag_s *diag = cbarg; struct bfa_ioc_s *ioc = diag->ioc; struct bfa_diag_memtest_result *res = diag->result; u32 loff = BFI_BOOT_MEMTEST_RES_ADDR; u32 pgnum, i; pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff); writel(pgnum, ioc->ioc_regs.host_page_num_fn); for (i = 0; i < (sizeof(struct bfa_diag_memtest_result) / sizeof(u32)); i++) { /* read test result from smem */ *((u32 *) res + i) = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff); loff += sizeof(u32); } /* Reset IOC fwstates to BFI_IOC_UNINIT */ bfa_ioc_reset_fwstate(ioc); res->status = swab32(res->status); bfa_trc(diag, res->status); if (res->status == BFI_BOOT_MEMTEST_RES_SIG) diag->status = BFA_STATUS_OK; else { diag->status = BFA_STATUS_MEMTEST_FAILED; res->addr = swab32(res->addr); res->exp = swab32(res->exp); res->act = swab32(res->act); res->err_status = swab32(res->err_status); res->err_status1 = swab32(res->err_status1); res->err_addr = swab32(res->err_addr); bfa_trc(diag, res->addr); bfa_trc(diag, res->exp); bfa_trc(diag, res->act); bfa_trc(diag, res->err_status); bfa_trc(diag, res->err_status1); bfa_trc(diag, res->err_addr); } diag->timer_active = 0; diag->cbfn(diag->cbarg, diag->status); diag->block = 0; } /* * Firmware ping */ /* * Perform DMA test directly */ static void diag_fwping_send(struct bfa_diag_s *diag) { struct bfi_diag_fwping_req_s *fwping_req; u32 i; bfa_trc(diag, diag->fwping.dbuf_pa); /* fill DMA area with pattern */ for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++) *((u32 *)diag->fwping.dbuf_kva + i) = diag->fwping.data; /* Fill mbox msg */ fwping_req = (struct bfi_diag_fwping_req_s *)diag->fwping.mbcmd.msg; /* Setup SG list */ bfa_alen_set(&fwping_req->alen, BFI_DIAG_DMA_BUF_SZ, diag->fwping.dbuf_pa); /* Set up dma count */ fwping_req->count = cpu_to_be32(diag->fwping.count); /* Set up data pattern */ fwping_req->data = diag->fwping.data; /* build host command */ bfi_h2i_set(fwping_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_FWPING, bfa_ioc_portid(diag->ioc)); /* send mbox cmd */ bfa_ioc_mbox_queue(diag->ioc, &diag->fwping.mbcmd); } static void diag_fwping_comp(struct bfa_diag_s *diag, struct bfi_diag_fwping_rsp_s *diag_rsp) { u32 rsp_data = diag_rsp->data; u8 rsp_dma_status = diag_rsp->dma_status; bfa_trc(diag, rsp_data); bfa_trc(diag, rsp_dma_status); if (rsp_dma_status == BFA_STATUS_OK) { u32 i, pat; pat = (diag->fwping.count & 0x1) ? ~(diag->fwping.data) : diag->fwping.data; /* Check mbox data */ if (diag->fwping.data != rsp_data) { bfa_trc(diag, rsp_data); diag->fwping.result->dmastatus = BFA_STATUS_DATACORRUPTED; diag->fwping.status = BFA_STATUS_DATACORRUPTED; diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status); diag->fwping.lock = 0; return; } /* Check dma pattern */ for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++) { if (*((u32 *)diag->fwping.dbuf_kva + i) != pat) { bfa_trc(diag, i); bfa_trc(diag, pat); bfa_trc(diag, *((u32 *)diag->fwping.dbuf_kva + i)); diag->fwping.result->dmastatus = BFA_STATUS_DATACORRUPTED; diag->fwping.status = BFA_STATUS_DATACORRUPTED; diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status); diag->fwping.lock = 0; return; } } diag->fwping.result->dmastatus = BFA_STATUS_OK; diag->fwping.status = BFA_STATUS_OK; diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status); diag->fwping.lock = 0; } else { diag->fwping.status = BFA_STATUS_HDMA_FAILED; diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status); diag->fwping.lock = 0; } } /* * Temperature Sensor */ static void diag_tempsensor_send(struct bfa_diag_s *diag) { struct bfi_diag_ts_req_s *msg; msg = (struct bfi_diag_ts_req_s *)diag->tsensor.mbcmd.msg; bfa_trc(diag, msg->temp); /* build host command */ bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_TEMPSENSOR, bfa_ioc_portid(diag->ioc)); /* send mbox cmd */ bfa_ioc_mbox_queue(diag->ioc, &diag->tsensor.mbcmd); } static void diag_tempsensor_comp(struct bfa_diag_s *diag, bfi_diag_ts_rsp_t *rsp) { if (!diag->tsensor.lock) { /* receiving response after ioc failure */ bfa_trc(diag, diag->tsensor.lock); return; } /* * ASIC junction tempsensor is a reg read operation * it will always return OK */ diag->tsensor.temp->temp = be16_to_cpu(rsp->temp); diag->tsensor.temp->ts_junc = rsp->ts_junc; diag->tsensor.temp->ts_brd = rsp->ts_brd; if (rsp->ts_brd) { /* tsensor.temp->status is brd_temp status */ diag->tsensor.temp->status = rsp->status; if (rsp->status == BFA_STATUS_OK) { diag->tsensor.temp->brd_temp = be16_to_cpu(rsp->brd_temp); } else diag->tsensor.temp->brd_temp = 0; } bfa_trc(diag, rsp->status); bfa_trc(diag, rsp->ts_junc); bfa_trc(diag, rsp->temp); bfa_trc(diag, rsp->ts_brd); bfa_trc(diag, rsp->brd_temp); /* tsensor status is always good bcos we always have junction temp */ diag->tsensor.status = BFA_STATUS_OK; diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status); diag->tsensor.lock = 0; } /* * LED Test command */ static void diag_ledtest_send(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest) { struct bfi_diag_ledtest_req_s *msg; msg = (struct bfi_diag_ledtest_req_s *)diag->ledtest.mbcmd.msg; /* build host command */ bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LEDTEST, bfa_ioc_portid(diag->ioc)); /* * convert the freq from N blinks per 10 sec to * crossbow ontime value. We do it here because division is need */ if (ledtest->freq) ledtest->freq = 500 / ledtest->freq; if (ledtest->freq == 0) ledtest->freq = 1; bfa_trc(diag, ledtest->freq); /* mcpy(&ledtest_req->req, ledtest, sizeof(bfa_diag_ledtest_t)); */ msg->cmd = (u8) ledtest->cmd; msg->color = (u8) ledtest->color; msg->portid = bfa_ioc_portid(diag->ioc); msg->led = ledtest->led; msg->freq = cpu_to_be16(ledtest->freq); /* send mbox cmd */ bfa_ioc_mbox_queue(diag->ioc, &diag->ledtest.mbcmd); } static void diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s *msg) { bfa_trc(diag, diag->ledtest.lock); diag->ledtest.lock = BFA_FALSE; /* no bfa_cb_queue is needed because driver is not waiting */ } /* * Port beaconing */ static void diag_portbeacon_send(struct bfa_diag_s *diag, bfa_boolean_t beacon, u32 sec) { struct bfi_diag_portbeacon_req_s *msg; msg = (struct bfi_diag_portbeacon_req_s *)diag->beacon.mbcmd.msg; /* build host command */ bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_PORTBEACON, bfa_ioc_portid(diag->ioc)); msg->beacon = beacon; msg->period = cpu_to_be32(sec); /* send mbox cmd */ bfa_ioc_mbox_queue(diag->ioc, &diag->beacon.mbcmd); } static void diag_portbeacon_comp(struct bfa_diag_s *diag) { bfa_trc(diag, diag->beacon.state); diag->beacon.state = BFA_FALSE; if (diag->cbfn_beacon) diag->cbfn_beacon(diag->dev, BFA_FALSE, diag->beacon.link_e2e); } /* * Diag hmbox handler */ static void bfa_diag_intr(void *diagarg, struct bfi_mbmsg_s *msg) { struct bfa_diag_s *diag = diagarg; switch (msg->mh.msg_id) { case BFI_DIAG_I2H_PORTBEACON: diag_portbeacon_comp(diag); break; case BFI_DIAG_I2H_FWPING: diag_fwping_comp(diag, (struct bfi_diag_fwping_rsp_s *) msg); break; case BFI_DIAG_I2H_TEMPSENSOR: diag_tempsensor_comp(diag, (bfi_diag_ts_rsp_t *) msg); break; case BFI_DIAG_I2H_LEDTEST: diag_ledtest_comp(diag, (struct bfi_diag_ledtest_rsp_s *) msg); break; default: bfa_trc(diag, msg->mh.msg_id); WARN_ON(1); } } /* * Gen RAM Test * * @param[in] *diag - diag data struct * @param[in] *memtest - mem test params input from upper layer, * @param[in] pattern - mem test pattern * @param[in] *result - mem test result * @param[in] cbfn - mem test callback functioin * @param[in] cbarg - callback functioin arg * * @param[out] */ bfa_status_t bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest, u32 pattern, struct bfa_diag_memtest_result *result, bfa_cb_diag_t cbfn, void *cbarg) { u32 memtest_tov; bfa_trc(diag, pattern); if (!bfa_ioc_adapter_is_disabled(diag->ioc)) return BFA_STATUS_ADAPTER_ENABLED; /* check to see if there is another destructive diag cmd running */ if (diag->block) { bfa_trc(diag, diag->block); return BFA_STATUS_DEVBUSY; } else diag->block = 1; diag->result = result; diag->cbfn = cbfn; diag->cbarg = cbarg; /* download memtest code and take LPU0 out of reset */ bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS); memtest_tov = (bfa_ioc_asic_gen(diag->ioc) == BFI_ASIC_GEN_CT2) ? CT2_BFA_DIAG_MEMTEST_TOV : BFA_DIAG_MEMTEST_TOV; bfa_timer_begin(diag->ioc->timer_mod, &diag->timer, bfa_diag_memtest_done, diag, memtest_tov); diag->timer_active = 1; return BFA_STATUS_OK; } /* * DIAG firmware ping command * * @param[in] *diag - diag data struct * @param[in] cnt - dma loop count for testing PCIE * @param[in] data - data pattern to pass in fw * @param[in] *result - pt to bfa_diag_fwping_result_t data struct * @param[in] cbfn - callback function * @param[in] *cbarg - callback functioin arg * * @param[out] */ bfa_status_t bfa_diag_fwping(struct bfa_diag_s *diag, u32 cnt, u32 data, struct bfa_diag_results_fwping *result, bfa_cb_diag_t cbfn, void *cbarg) { bfa_trc(diag, cnt); bfa_trc(diag, data); if (!bfa_ioc_is_operational(diag->ioc)) return BFA_STATUS_IOC_NON_OP; if (bfa_asic_id_ct2(bfa_ioc_devid((diag->ioc))) && ((diag->ioc)->clscode == BFI_PCIFN_CLASS_ETH)) return BFA_STATUS_CMD_NOTSUPP; /* check to see if there is another destructive diag cmd running */ if (diag->block || diag->fwping.lock) { bfa_trc(diag, diag->block); bfa_trc(diag, diag->fwping.lock); return BFA_STATUS_DEVBUSY; } /* Initialization */ diag->fwping.lock = 1; diag->fwping.cbfn = cbfn; diag->fwping.cbarg = cbarg; diag->fwping.result = result; diag->fwping.data = data; diag->fwping.count = cnt; /* Init test results */ diag->fwping.result->data = 0; diag->fwping.result->status = BFA_STATUS_OK; /* kick off the first ping */ diag_fwping_send(diag); return BFA_STATUS_OK; } /* * Read Temperature Sensor * * @param[in] *diag - diag data struct * @param[in] *result - pt to bfa_diag_temp_t data struct * @param[in] cbfn - callback function * @param[in] *cbarg - callback functioin arg * * @param[out] */ bfa_status_t bfa_diag_tsensor_query(struct bfa_diag_s *diag, struct bfa_diag_results_tempsensor_s *result, bfa_cb_diag_t cbfn, void *cbarg) { /* check to see if there is a destructive diag cmd running */ if (diag->block || diag->tsensor.lock) { bfa_trc(diag, diag->block); bfa_trc(diag, diag->tsensor.lock); return BFA_STATUS_DEVBUSY; } if (!bfa_ioc_is_operational(diag->ioc)) return BFA_STATUS_IOC_NON_OP; /* Init diag mod params */ diag->tsensor.lock = 1; diag->tsensor.temp = result; diag->tsensor.cbfn = cbfn; diag->tsensor.cbarg = cbarg; diag->tsensor.status = BFA_STATUS_OK; /* Send msg to fw */ diag_tempsensor_send(diag); return BFA_STATUS_OK; } /* * LED Test command * * @param[in] *diag - diag data struct * @param[in] *ledtest - pt to ledtest data structure * * @param[out] */ bfa_status_t bfa_diag_ledtest(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest) { bfa_trc(diag, ledtest->cmd); if (!bfa_ioc_is_operational(diag->ioc)) return BFA_STATUS_IOC_NON_OP; if (diag->beacon.state) return BFA_STATUS_BEACON_ON; if (diag->ledtest.lock) return BFA_STATUS_LEDTEST_OP; /* Send msg to fw */ diag->ledtest.lock = BFA_TRUE; diag_ledtest_send(diag, ledtest); return BFA_STATUS_OK; } /* * Port beaconing command * * @param[in] *diag - diag data struct * @param[in] beacon - port beaconing 1:ON 0:OFF * @param[in] link_e2e_beacon - link beaconing 1:ON 0:OFF * @param[in] sec - beaconing duration in seconds * * @param[out] */ bfa_status_t bfa_diag_beacon_port(struct bfa_diag_s *diag, bfa_boolean_t beacon, bfa_boolean_t link_e2e_beacon, uint32_t sec) { bfa_trc(diag, beacon); bfa_trc(diag, link_e2e_beacon); bfa_trc(diag, sec); if (!bfa_ioc_is_operational(diag->ioc)) return BFA_STATUS_IOC_NON_OP; if (diag->ledtest.lock) return BFA_STATUS_LEDTEST_OP; if (diag->beacon.state && beacon) /* beacon alread on */ return BFA_STATUS_BEACON_ON; diag->beacon.state = beacon; diag->beacon.link_e2e = link_e2e_beacon; if (diag->cbfn_beacon) diag->cbfn_beacon(diag->dev, beacon, link_e2e_beacon); /* Send msg to fw */ diag_portbeacon_send(diag, beacon, sec); return BFA_STATUS_OK; } /* * Return DMA memory needed by diag module. */ u32 bfa_diag_meminfo(void) { return BFA_ROUNDUP(BFI_DIAG_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); } /* * Attach virtual and physical memory for Diag. */ void bfa_diag_attach(struct bfa_diag_s *diag, struct bfa_ioc_s *ioc, void *dev, bfa_cb_diag_beacon_t cbfn_beacon, struct bfa_trc_mod_s *trcmod) { diag->dev = dev; diag->ioc = ioc; diag->trcmod = trcmod; diag->block = 0; diag->cbfn = NULL; diag->cbarg = NULL; diag->result = NULL; diag->cbfn_beacon = cbfn_beacon; bfa_ioc_mbox_regisr(diag->ioc, BFI_MC_DIAG, bfa_diag_intr, diag); bfa_q_qe_init(&diag->ioc_notify); bfa_ioc_notify_init(&diag->ioc_notify, bfa_diag_notify, diag); list_add_tail(&diag->ioc_notify.qe, &diag->ioc->notify_q); } void bfa_diag_memclaim(struct bfa_diag_s *diag, u8 *dm_kva, u64 dm_pa) { diag->fwping.dbuf_kva = dm_kva; diag->fwping.dbuf_pa = dm_pa; memset(diag->fwping.dbuf_kva, 0, BFI_DIAG_DMA_BUF_SZ); } /* * PHY module specific */ #define BFA_PHY_DMA_BUF_SZ 0x02000 /* 8k dma buffer */ #define BFA_PHY_LOCK_STATUS 0x018878 /* phy semaphore status reg */ static void bfa_phy_ntoh32(u32 *obuf, u32 *ibuf, int sz) { int i, m = sz >> 2; for (i = 0; i < m; i++) obuf[i] = be32_to_cpu(ibuf[i]); } static bfa_boolean_t bfa_phy_present(struct bfa_phy_s *phy) { return (phy->ioc->attr->card_type == BFA_MFG_TYPE_LIGHTNING); } static void bfa_phy_notify(void *cbarg, enum bfa_ioc_event_e event) { struct bfa_phy_s *phy = cbarg; bfa_trc(phy, event); switch (event) { case BFA_IOC_E_DISABLED: case BFA_IOC_E_FAILED: if (phy->op_busy) { phy->status = BFA_STATUS_IOC_FAILURE; phy->cbfn(phy->cbarg, phy->status); phy->op_busy = 0; } break; default: break; } } /* * Send phy attribute query request. * * @param[in] cbarg - callback argument */ static void bfa_phy_query_send(void *cbarg) { struct bfa_phy_s *phy = cbarg; struct bfi_phy_query_req_s *msg = (struct bfi_phy_query_req_s *) phy->mb.msg; msg->instance = phy->instance; bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_QUERY_REQ, bfa_ioc_portid(phy->ioc)); bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_attr_s), phy->dbuf_pa); bfa_ioc_mbox_queue(phy->ioc, &phy->mb); } /* * Send phy write request. * * @param[in] cbarg - callback argument */ static void bfa_phy_write_send(void *cbarg) { struct bfa_phy_s *phy = cbarg; struct bfi_phy_write_req_s *msg = (struct bfi_phy_write_req_s *) phy->mb.msg; u32 len; u16 *buf, *dbuf; int i, sz; msg->instance = phy->instance; msg->offset = cpu_to_be32(phy->addr_off + phy->offset); len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ? phy->residue : BFA_PHY_DMA_BUF_SZ; msg->length = cpu_to_be32(len); /* indicate if it's the last msg of the whole write operation */ msg->last = (len == phy->residue) ? 1 : 0; bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_WRITE_REQ, bfa_ioc_portid(phy->ioc)); bfa_alen_set(&msg->alen, len, phy->dbuf_pa); buf = (u16 *) (phy->ubuf + phy->offset); dbuf = (u16 *)phy->dbuf_kva; sz = len >> 1; for (i = 0; i < sz; i++) buf[i] = cpu_to_be16(dbuf[i]); bfa_ioc_mbox_queue(phy->ioc, &phy->mb); phy->residue -= len; phy->offset += len; } /* * Send phy read request. * * @param[in] cbarg - callback argument */ static void bfa_phy_read_send(void *cbarg) { struct bfa_phy_s *phy = cbarg; struct bfi_phy_read_req_s *msg = (struct bfi_phy_read_req_s *) phy->mb.msg; u32 len; msg->instance = phy->instance; msg->offset = cpu_to_be32(phy->addr_off + phy->offset); len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ? phy->residue : BFA_PHY_DMA_BUF_SZ; msg->length = cpu_to_be32(len); bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_READ_REQ, bfa_ioc_portid(phy->ioc)); bfa_alen_set(&msg->alen, len, phy->dbuf_pa); bfa_ioc_mbox_queue(phy->ioc, &phy->mb); } /* * Send phy stats request. * * @param[in] cbarg - callback argument */ static void bfa_phy_stats_send(void *cbarg) { struct bfa_phy_s *phy = cbarg; struct bfi_phy_stats_req_s *msg = (struct bfi_phy_stats_req_s *) phy->mb.msg; msg->instance = phy->instance; bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_STATS_REQ, bfa_ioc_portid(phy->ioc)); bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_stats_s), phy->dbuf_pa); bfa_ioc_mbox_queue(phy->ioc, &phy->mb); } /* * Flash memory info API. * * @param[in] mincfg - minimal cfg variable */ u32 bfa_phy_meminfo(bfa_boolean_t mincfg) { /* min driver doesn't need phy */ if (mincfg) return 0; return BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); } /* * Flash attach API. * * @param[in] phy - phy structure * @param[in] ioc - ioc structure * @param[in] dev - device structure * @param[in] trcmod - trace module * @param[in] logmod - log module */ void bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc, void *dev, struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg) { phy->ioc = ioc; phy->trcmod = trcmod; phy->cbfn = NULL; phy->cbarg = NULL; phy->op_busy = 0; bfa_ioc_mbox_regisr(phy->ioc, BFI_MC_PHY, bfa_phy_intr, phy); bfa_q_qe_init(&phy->ioc_notify); bfa_ioc_notify_init(&phy->ioc_notify, bfa_phy_notify, phy); list_add_tail(&phy->ioc_notify.qe, &phy->ioc->notify_q); /* min driver doesn't need phy */ if (mincfg) { phy->dbuf_kva = NULL; phy->dbuf_pa = 0; } } /* * Claim memory for phy * * @param[in] phy - phy structure * @param[in] dm_kva - pointer to virtual memory address * @param[in] dm_pa - physical memory address * @param[in] mincfg - minimal cfg variable */ void bfa_phy_memclaim(struct bfa_phy_s *phy, u8 *dm_kva, u64 dm_pa, bfa_boolean_t mincfg) { if (mincfg) return; phy->dbuf_kva = dm_kva; phy->dbuf_pa = dm_pa; memset(phy->dbuf_kva, 0, BFA_PHY_DMA_BUF_SZ); dm_kva += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); dm_pa += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); } bfa_boolean_t bfa_phy_busy(struct bfa_ioc_s *ioc) { void __iomem *rb; rb = bfa_ioc_bar0(ioc); return readl(rb + BFA_PHY_LOCK_STATUS); } /* * Get phy attribute. * * @param[in] phy - phy structure * @param[in] attr - phy attribute structure * @param[in] cbfn - callback function * @param[in] cbarg - callback argument * * Return status. */ bfa_status_t bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance, struct bfa_phy_attr_s *attr, bfa_cb_phy_t cbfn, void *cbarg) { bfa_trc(phy, BFI_PHY_H2I_QUERY_REQ); bfa_trc(phy, instance); if (!bfa_phy_present(phy)) return BFA_STATUS_PHY_NOT_PRESENT; if (!bfa_ioc_is_operational(phy->ioc)) return BFA_STATUS_IOC_NON_OP; if (phy->op_busy || bfa_phy_busy(phy->ioc)) { bfa_trc(phy, phy->op_busy); return BFA_STATUS_DEVBUSY; } phy->op_busy = 1; phy->cbfn = cbfn; phy->cbarg = cbarg; phy->instance = instance; phy->ubuf = (uint8_t *) attr; bfa_phy_query_send(phy); return BFA_STATUS_OK; } /* * Get phy stats. * * @param[in] phy - phy structure * @param[in] instance - phy image instance * @param[in] stats - pointer to phy stats * @param[in] cbfn - callback function * @param[in] cbarg - callback argument * * Return status. */ bfa_status_t bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance, struct bfa_phy_stats_s *stats, bfa_cb_phy_t cbfn, void *cbarg) { bfa_trc(phy, BFI_PHY_H2I_STATS_REQ); bfa_trc(phy, instance); if (!bfa_phy_present(phy)) return BFA_STATUS_PHY_NOT_PRESENT; if (!bfa_ioc_is_operational(phy->ioc)) return BFA_STATUS_IOC_NON_OP; if (phy->op_busy || bfa_phy_busy(phy->ioc)) { bfa_trc(phy, phy->op_busy); return BFA_STATUS_DEVBUSY; } phy->op_busy = 1; phy->cbfn = cbfn; phy->cbarg = cbarg; phy->instance = instance; phy->ubuf = (u8 *) stats; bfa_phy_stats_send(phy); return BFA_STATUS_OK; } /* * Update phy image. * * @param[in] phy - phy structure * @param[in] instance - phy image instance * @param[in] buf - update data buffer * @param[in] len - data buffer length * @param[in] offset - offset relative to starting address * @param[in] cbfn - callback function * @param[in] cbarg - callback argument * * Return status. */ bfa_status_t bfa_phy_update(struct bfa_phy_s *phy, u8 instance, void *buf, u32 len, u32 offset, bfa_cb_phy_t cbfn, void *cbarg) { bfa_trc(phy, BFI_PHY_H2I_WRITE_REQ); bfa_trc(phy, instance); bfa_trc(phy, len); bfa_trc(phy, offset); if (!bfa_phy_present(phy)) return BFA_STATUS_PHY_NOT_PRESENT; if (!bfa_ioc_is_operational(phy->ioc)) return BFA_STATUS_IOC_NON_OP; /* 'len' must be in word (4-byte) boundary */ if (!len || (len & 0x03)) return BFA_STATUS_FAILED; if (phy->op_busy || bfa_phy_busy(phy->ioc)) { bfa_trc(phy, phy->op_busy); return BFA_STATUS_DEVBUSY; } phy->op_busy = 1; phy->cbfn = cbfn; phy->cbarg = cbarg; phy->instance = instance; phy->residue = len; phy->offset = 0; phy->addr_off = offset; phy->ubuf = buf; bfa_phy_write_send(phy); return BFA_STATUS_OK; } /* * Read phy image. * * @param[in] phy - phy structure * @param[in] instance - phy image instance * @param[in] buf - read data buffer * @param[in] len - data buffer length * @param[in] offset - offset relative to starting address * @param[in] cbfn - callback function * @param[in] cbarg - callback argument * * Return status. */ bfa_status_t bfa_phy_read(struct bfa_phy_s *phy, u8 instance, void *buf, u32 len, u32 offset, bfa_cb_phy_t cbfn, void *cbarg) { bfa_trc(phy, BFI_PHY_H2I_READ_REQ); bfa_trc(phy, instance); bfa_trc(phy, len); bfa_trc(phy, offset); if (!bfa_phy_present(phy)) return BFA_STATUS_PHY_NOT_PRESENT; if (!bfa_ioc_is_operational(phy->ioc)) return BFA_STATUS_IOC_NON_OP; /* 'len' must be in word (4-byte) boundary */ if (!len || (len & 0x03)) return BFA_STATUS_FAILED; if (phy->op_busy || bfa_phy_busy(phy->ioc)) { bfa_trc(phy, phy->op_busy); return BFA_STATUS_DEVBUSY; } phy->op_busy = 1; phy->cbfn = cbfn; phy->cbarg = cbarg; phy->instance = instance; phy->residue = len; phy->offset = 0; phy->addr_off = offset; phy->ubuf = buf; bfa_phy_read_send(phy); return BFA_STATUS_OK; } /* * Process phy response messages upon receiving interrupts. * * @param[in] phyarg - phy structure * @param[in] msg - message structure */ void bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg) { struct bfa_phy_s *phy = phyarg; u32 status; union { struct bfi_phy_query_rsp_s *query; struct bfi_phy_stats_rsp_s *stats; struct bfi_phy_write_rsp_s *write; struct bfi_phy_read_rsp_s *read; struct bfi_mbmsg_s *msg; } m; m.msg = msg; bfa_trc(phy, msg->mh.msg_id); if (!phy->op_busy) { /* receiving response after ioc failure */ bfa_trc(phy, 0x9999); return; } switch (msg->mh.msg_id) { case BFI_PHY_I2H_QUERY_RSP: status = be32_to_cpu(m.query->status); bfa_trc(phy, status); if (status == BFA_STATUS_OK) { struct bfa_phy_attr_s *attr = (struct bfa_phy_attr_s *) phy->ubuf; bfa_phy_ntoh32((u32 *)attr, (u32 *)phy->dbuf_kva, sizeof(struct bfa_phy_attr_s)); bfa_trc(phy, attr->status); bfa_trc(phy, attr->length); } phy->status = status; phy->op_busy = 0; if (phy->cbfn) phy->cbfn(phy->cbarg, phy->status); break; case BFI_PHY_I2H_STATS_RSP: status = be32_to_cpu(m.stats->status); bfa_trc(phy, status); if (status == BFA_STATUS_OK) { struct bfa_phy_stats_s *stats = (struct bfa_phy_stats_s *) phy->ubuf; bfa_phy_ntoh32((u32 *)stats, (u32 *)phy->dbuf_kva, sizeof(struct bfa_phy_stats_s)); bfa_trc(phy, stats->status); } phy->status = status; phy->op_busy = 0; if (phy->cbfn) phy->cbfn(phy->cbarg, phy->status); break; case BFI_PHY_I2H_WRITE_RSP: status = be32_to_cpu(m.write->status); bfa_trc(phy, status); if (status != BFA_STATUS_OK || phy->residue == 0) { phy->status = status; phy->op_busy = 0; if (phy->cbfn) phy->cbfn(phy->cbarg, phy->status); } else { bfa_trc(phy, phy->offset); bfa_phy_write_send(phy); } break; case BFI_PHY_I2H_READ_RSP: status = be32_to_cpu(m.read->status); bfa_trc(phy, status); if (status != BFA_STATUS_OK) { phy->status = status; phy->op_busy = 0; if (phy->cbfn) phy->cbfn(phy->cbarg, phy->status); } else { u32 len = be32_to_cpu(m.read->length); u16 *buf = (u16 *)(phy->ubuf + phy->offset); u16 *dbuf = (u16 *)phy->dbuf_kva; int i, sz = len >> 1; bfa_trc(phy, phy->offset); bfa_trc(phy, len); for (i = 0; i < sz; i++) buf[i] = be16_to_cpu(dbuf[i]); phy->residue -= len; phy->offset += len; if (phy->residue == 0) { phy->status = status; phy->op_busy = 0; if (phy->cbfn) phy->cbfn(phy->cbarg, phy->status); } else bfa_phy_read_send(phy); } break; default: WARN_ON(1); } } /* * DCONF state machine events */ enum bfa_dconf_event { BFA_DCONF_SM_INIT = 1, /* dconf Init */ BFA_DCONF_SM_FLASH_COMP = 2, /* read/write to flash */ BFA_DCONF_SM_WR = 3, /* binding change, map */ BFA_DCONF_SM_TIMEOUT = 4, /* Start timer */ BFA_DCONF_SM_EXIT = 5, /* exit dconf module */ BFA_DCONF_SM_IOCDISABLE = 6, /* IOC disable event */ }; /* forward declaration of DCONF state machine */ static void bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event); static void bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event); static void bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event); static void bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event); static void bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event); static void bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event); static void bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event); static void bfa_dconf_cbfn(void *dconf, bfa_status_t status); static void bfa_dconf_timer(void *cbarg); static bfa_status_t bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf); static void bfa_dconf_init_cb(void *arg, bfa_status_t status); /* * Beginning state of dconf module. Waiting for an event to start. */ static void bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event) { bfa_status_t bfa_status; bfa_trc(dconf->bfa, event); switch (event) { case BFA_DCONF_SM_INIT: if (dconf->min_cfg) { bfa_trc(dconf->bfa, dconf->min_cfg); bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE); return; } bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read); bfa_timer_start(dconf->bfa, &dconf->timer, bfa_dconf_timer, dconf, 2 * BFA_DCONF_UPDATE_TOV); bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa), BFA_FLASH_PART_DRV, dconf->instance, dconf->dconf, sizeof(struct bfa_dconf_s), 0, bfa_dconf_init_cb, dconf->bfa); if (bfa_status != BFA_STATUS_OK) { bfa_timer_stop(&dconf->timer); bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED); bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); return; } break; case BFA_DCONF_SM_EXIT: bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE); break; case BFA_DCONF_SM_IOCDISABLE: case BFA_DCONF_SM_WR: case BFA_DCONF_SM_FLASH_COMP: break; default: bfa_sm_fault(dconf->bfa, event); } } /* * Read flash for dconf entries and make a call back to the driver once done. */ static void bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event) { bfa_trc(dconf->bfa, event); switch (event) { case BFA_DCONF_SM_FLASH_COMP: bfa_timer_stop(&dconf->timer); bfa_sm_set_state(dconf, bfa_dconf_sm_ready); break; case BFA_DCONF_SM_TIMEOUT: bfa_sm_set_state(dconf, bfa_dconf_sm_ready); bfa_ioc_suspend(&dconf->bfa->ioc); break; case BFA_DCONF_SM_EXIT: bfa_timer_stop(&dconf->timer); bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE); break; case BFA_DCONF_SM_IOCDISABLE: bfa_timer_stop(&dconf->timer); bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); break; default: bfa_sm_fault(dconf->bfa, event); } } /* * DCONF Module is in ready state. Has completed the initialization. */ static void bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event) { bfa_trc(dconf->bfa, event); switch (event) { case BFA_DCONF_SM_WR: bfa_timer_start(dconf->bfa, &dconf->timer, bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV); bfa_sm_set_state(dconf, bfa_dconf_sm_dirty); break; case BFA_DCONF_SM_EXIT: bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE); break; case BFA_DCONF_SM_INIT: case BFA_DCONF_SM_IOCDISABLE: break; default: bfa_sm_fault(dconf->bfa, event); } } /* * entries are dirty, write back to the flash. */ static void bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event) { bfa_trc(dconf->bfa, event); switch (event) { case BFA_DCONF_SM_TIMEOUT: bfa_sm_set_state(dconf, bfa_dconf_sm_sync); bfa_dconf_flash_write(dconf); break; case BFA_DCONF_SM_WR: bfa_timer_stop(&dconf->timer); bfa_timer_start(dconf->bfa, &dconf->timer, bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV); break; case BFA_DCONF_SM_EXIT: bfa_timer_stop(&dconf->timer); bfa_timer_start(dconf->bfa, &dconf->timer, bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV); bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync); bfa_dconf_flash_write(dconf); break; case BFA_DCONF_SM_FLASH_COMP: break; case BFA_DCONF_SM_IOCDISABLE: bfa_timer_stop(&dconf->timer); bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty); break; default: bfa_sm_fault(dconf->bfa, event); } } /* * Sync the dconf entries to the flash. */ static void bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event) { bfa_trc(dconf->bfa, event); switch (event) { case BFA_DCONF_SM_IOCDISABLE: case BFA_DCONF_SM_FLASH_COMP: bfa_timer_stop(&dconf->timer); fallthrough; case BFA_DCONF_SM_TIMEOUT: bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE); break; default: bfa_sm_fault(dconf->bfa, event); } } static void bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event) { bfa_trc(dconf->bfa, event); switch (event) { case BFA_DCONF_SM_FLASH_COMP: bfa_sm_set_state(dconf, bfa_dconf_sm_ready); break; case BFA_DCONF_SM_WR: bfa_timer_start(dconf->bfa, &dconf->timer, bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV); bfa_sm_set_state(dconf, bfa_dconf_sm_dirty); break; case BFA_DCONF_SM_EXIT: bfa_timer_start(dconf->bfa, &dconf->timer, bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV); bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync); break; case BFA_DCONF_SM_IOCDISABLE: bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty); break; default: bfa_sm_fault(dconf->bfa, event); } } static void bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event) { bfa_trc(dconf->bfa, event); switch (event) { case BFA_DCONF_SM_INIT: bfa_timer_start(dconf->bfa, &dconf->timer, bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV); bfa_sm_set_state(dconf, bfa_dconf_sm_dirty); break; case BFA_DCONF_SM_EXIT: bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE); break; case BFA_DCONF_SM_IOCDISABLE: break; default: bfa_sm_fault(dconf->bfa, event); } } /* * Compute and return memory needed by DRV_CFG module. */ void bfa_dconf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, struct bfa_s *bfa) { struct bfa_mem_kva_s *dconf_kva = BFA_MEM_DCONF_KVA(bfa); if (cfg->drvcfg.min_cfg) bfa_mem_kva_setup(meminfo, dconf_kva, sizeof(struct bfa_dconf_hdr_s)); else bfa_mem_kva_setup(meminfo, dconf_kva, sizeof(struct bfa_dconf_s)); } void bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg) { struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa); dconf->bfad = bfad; dconf->bfa = bfa; dconf->instance = bfa->ioc.port_id; bfa_trc(bfa, dconf->instance); dconf->dconf = (struct bfa_dconf_s *) bfa_mem_kva_curp(dconf); if (cfg->drvcfg.min_cfg) { bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_hdr_s); dconf->min_cfg = BFA_TRUE; } else { dconf->min_cfg = BFA_FALSE; bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_s); } bfa_dconf_read_data_valid(bfa) = BFA_FALSE; bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); } static void bfa_dconf_init_cb(void *arg, bfa_status_t status) { struct bfa_s *bfa = arg; struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa); if (status == BFA_STATUS_OK) { bfa_dconf_read_data_valid(bfa) = BFA_TRUE; if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE) dconf->dconf->hdr.signature = BFI_DCONF_SIGNATURE; if (dconf->dconf->hdr.version != BFI_DCONF_VERSION) dconf->dconf->hdr.version = BFI_DCONF_VERSION; } bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP); bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DCONF_DONE); } void bfa_dconf_modinit(struct bfa_s *bfa) { struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa); bfa_sm_send_event(dconf, BFA_DCONF_SM_INIT); } static void bfa_dconf_timer(void *cbarg) { struct bfa_dconf_mod_s *dconf = cbarg; bfa_sm_send_event(dconf, BFA_DCONF_SM_TIMEOUT); } void bfa_dconf_iocdisable(struct bfa_s *bfa) { struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa); bfa_sm_send_event(dconf, BFA_DCONF_SM_IOCDISABLE); } static bfa_status_t bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf) { bfa_status_t bfa_status; bfa_trc(dconf->bfa, 0); bfa_status = bfa_flash_update_part(BFA_FLASH(dconf->bfa), BFA_FLASH_PART_DRV, dconf->instance, dconf->dconf, sizeof(struct bfa_dconf_s), 0, bfa_dconf_cbfn, dconf); if (bfa_status != BFA_STATUS_OK) WARN_ON(bfa_status); bfa_trc(dconf->bfa, bfa_status); return bfa_status; } bfa_status_t bfa_dconf_update(struct bfa_s *bfa) { struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa); bfa_trc(dconf->bfa, 0); if (bfa_sm_cmp_state(dconf, bfa_dconf_sm_iocdown_dirty)) return BFA_STATUS_FAILED; if (dconf->min_cfg) { bfa_trc(dconf->bfa, dconf->min_cfg); return BFA_STATUS_FAILED; } bfa_sm_send_event(dconf, BFA_DCONF_SM_WR); return BFA_STATUS_OK; } static void bfa_dconf_cbfn(void *arg, bfa_status_t status) { struct bfa_dconf_mod_s *dconf = arg; WARN_ON(status); bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP); } void bfa_dconf_modexit(struct bfa_s *bfa) { struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa); bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT); } /* * FRU specific functions */ #define BFA_FRU_DMA_BUF_SZ 0x02000 /* 8k dma buffer */ #define BFA_FRU_CHINOOK_MAX_SIZE 0x10000 #define BFA_FRU_LIGHTNING_MAX_SIZE 0x200 static void bfa_fru_notify(void *cbarg, enum bfa_ioc_event_e event) { struct bfa_fru_s *fru = cbarg; bfa_trc(fru, event); switch (event) { case BFA_IOC_E_DISABLED: case BFA_IOC_E_FAILED: if (fru->op_busy) { fru->status = BFA_STATUS_IOC_FAILURE; fru->cbfn(fru->cbarg, fru->status); fru->op_busy = 0; } break; default: break; } } /* * Send fru write request. * * @param[in] cbarg - callback argument */ static void bfa_fru_write_send(void *cbarg, enum bfi_fru_h2i_msgs msg_type) { struct bfa_fru_s *fru = cbarg; struct bfi_fru_write_req_s *msg = (struct bfi_fru_write_req_s *) fru->mb.msg; u32 len; msg->offset = cpu_to_be32(fru->addr_off + fru->offset); len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ? fru->residue : BFA_FRU_DMA_BUF_SZ; msg->length = cpu_to_be32(len); /* * indicate if it's the last msg of the whole write operation */ msg->last = (len == fru->residue) ? 1 : 0; msg->trfr_cmpl = (len == fru->residue) ? fru->trfr_cmpl : 0; bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc)); bfa_alen_set(&msg->alen, len, fru->dbuf_pa); memcpy(fru->dbuf_kva, fru->ubuf + fru->offset, len); bfa_ioc_mbox_queue(fru->ioc, &fru->mb); fru->residue -= len; fru->offset += len; } /* * Send fru read request. * * @param[in] cbarg - callback argument */ static void bfa_fru_read_send(void *cbarg, enum bfi_fru_h2i_msgs msg_type) { struct bfa_fru_s *fru = cbarg; struct bfi_fru_read_req_s *msg = (struct bfi_fru_read_req_s *) fru->mb.msg; u32 len; msg->offset = cpu_to_be32(fru->addr_off + fru->offset); len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ? fru->residue : BFA_FRU_DMA_BUF_SZ; msg->length = cpu_to_be32(len); bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc)); bfa_alen_set(&msg->alen, len, fru->dbuf_pa); bfa_ioc_mbox_queue(fru->ioc, &fru->mb); } /* * Flash memory info API. * * @param[in] mincfg - minimal cfg variable */ u32 bfa_fru_meminfo(bfa_boolean_t mincfg) { /* min driver doesn't need fru */ if (mincfg) return 0; return BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); } /* * Flash attach API. * * @param[in] fru - fru structure * @param[in] ioc - ioc structure * @param[in] dev - device structure * @param[in] trcmod - trace module * @param[in] logmod - log module */ void bfa_fru_attach(struct bfa_fru_s *fru, struct bfa_ioc_s *ioc, void *dev, struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg) { fru->ioc = ioc; fru->trcmod = trcmod; fru->cbfn = NULL; fru->cbarg = NULL; fru->op_busy = 0; bfa_ioc_mbox_regisr(fru->ioc, BFI_MC_FRU, bfa_fru_intr, fru); bfa_q_qe_init(&fru->ioc_notify); bfa_ioc_notify_init(&fru->ioc_notify, bfa_fru_notify, fru); list_add_tail(&fru->ioc_notify.qe, &fru->ioc->notify_q); /* min driver doesn't need fru */ if (mincfg) { fru->dbuf_kva = NULL; fru->dbuf_pa = 0; } } /* * Claim memory for fru * * @param[in] fru - fru structure * @param[in] dm_kva - pointer to virtual memory address * @param[in] dm_pa - frusical memory address * @param[in] mincfg - minimal cfg variable */ void bfa_fru_memclaim(struct bfa_fru_s *fru, u8 *dm_kva, u64 dm_pa, bfa_boolean_t mincfg) { if (mincfg) return; fru->dbuf_kva = dm_kva; fru->dbuf_pa = dm_pa; memset(fru->dbuf_kva, 0, BFA_FRU_DMA_BUF_SZ); dm_kva += BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); dm_pa += BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); } /* * Update fru vpd image. * * @param[in] fru - fru structure * @param[in] buf - update data buffer * @param[in] len - data buffer length * @param[in] offset - offset relative to starting address * @param[in] cbfn - callback function * @param[in] cbarg - callback argument * * Return status. */ bfa_status_t bfa_fruvpd_update(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset, bfa_cb_fru_t cbfn, void *cbarg, u8 trfr_cmpl) { bfa_trc(fru, BFI_FRUVPD_H2I_WRITE_REQ); bfa_trc(fru, len); bfa_trc(fru, offset); if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2 && fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK2) return BFA_STATUS_FRU_NOT_PRESENT; if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK) return BFA_STATUS_CMD_NOTSUPP; if (!bfa_ioc_is_operational(fru->ioc)) return BFA_STATUS_IOC_NON_OP; if (fru->op_busy) { bfa_trc(fru, fru->op_busy); return BFA_STATUS_DEVBUSY; } fru->op_busy = 1; fru->cbfn = cbfn; fru->cbarg = cbarg; fru->residue = len; fru->offset = 0; fru->addr_off = offset; fru->ubuf = buf; fru->trfr_cmpl = trfr_cmpl; bfa_fru_write_send(fru, BFI_FRUVPD_H2I_WRITE_REQ); return BFA_STATUS_OK; } /* * Read fru vpd image. * * @param[in] fru - fru structure * @param[in] buf - read data buffer * @param[in] len - data buffer length * @param[in] offset - offset relative to starting address * @param[in] cbfn - callback function * @param[in] cbarg - callback argument * * Return status. */ bfa_status_t bfa_fruvpd_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset, bfa_cb_fru_t cbfn, void *cbarg) { bfa_trc(fru, BFI_FRUVPD_H2I_READ_REQ); bfa_trc(fru, len); bfa_trc(fru, offset); if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2) return BFA_STATUS_FRU_NOT_PRESENT; if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK && fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK2) return BFA_STATUS_CMD_NOTSUPP; if (!bfa_ioc_is_operational(fru->ioc)) return BFA_STATUS_IOC_NON_OP; if (fru->op_busy) { bfa_trc(fru, fru->op_busy); return BFA_STATUS_DEVBUSY; } fru->op_busy = 1; fru->cbfn = cbfn; fru->cbarg = cbarg; fru->residue = len; fru->offset = 0; fru->addr_off = offset; fru->ubuf = buf; bfa_fru_read_send(fru, BFI_FRUVPD_H2I_READ_REQ); return BFA_STATUS_OK; } /* * Get maximum size fru vpd image. * * @param[in] fru - fru structure * @param[out] size - maximum size of fru vpd data * * Return status. */ bfa_status_t bfa_fruvpd_get_max_size(struct bfa_fru_s *fru, u32 *max_size) { if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2) return BFA_STATUS_FRU_NOT_PRESENT; if (!bfa_ioc_is_operational(fru->ioc)) return BFA_STATUS_IOC_NON_OP; if (fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK || fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK2) *max_size = BFA_FRU_CHINOOK_MAX_SIZE; else return BFA_STATUS_CMD_NOTSUPP; return BFA_STATUS_OK; } /* * tfru write. * * @param[in] fru - fru structure * @param[in] buf - update data buffer * @param[in] len - data buffer length * @param[in] offset - offset relative to starting address * @param[in] cbfn - callback function * @param[in] cbarg - callback argument * * Return status. */ bfa_status_t bfa_tfru_write(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset, bfa_cb_fru_t cbfn, void *cbarg) { bfa_trc(fru, BFI_TFRU_H2I_WRITE_REQ); bfa_trc(fru, len); bfa_trc(fru, offset); bfa_trc(fru, *((u8 *) buf)); if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2) return BFA_STATUS_FRU_NOT_PRESENT; if (!bfa_ioc_is_operational(fru->ioc)) return BFA_STATUS_IOC_NON_OP; if (fru->op_busy) { bfa_trc(fru, fru->op_busy); return BFA_STATUS_DEVBUSY; } fru->op_busy = 1; fru->cbfn = cbfn; fru->cbarg = cbarg; fru->residue = len; fru->offset = 0; fru->addr_off = offset; fru->ubuf = buf; bfa_fru_write_send(fru, BFI_TFRU_H2I_WRITE_REQ); return BFA_STATUS_OK; } /* * tfru read. * * @param[in] fru - fru structure * @param[in] buf - read data buffer * @param[in] len - data buffer length * @param[in] offset - offset relative to starting address * @param[in] cbfn - callback function * @param[in] cbarg - callback argument * * Return status. */ bfa_status_t bfa_tfru_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset, bfa_cb_fru_t cbfn, void *cbarg) { bfa_trc(fru, BFI_TFRU_H2I_READ_REQ); bfa_trc(fru, len); bfa_trc(fru, offset); if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2) return BFA_STATUS_FRU_NOT_PRESENT; if (!bfa_ioc_is_operational(fru->ioc)) return BFA_STATUS_IOC_NON_OP; if (fru->op_busy) { bfa_trc(fru, fru->op_busy); return BFA_STATUS_DEVBUSY; } fru->op_busy = 1; fru->cbfn = cbfn; fru->cbarg = cbarg; fru->residue = len; fru->offset = 0; fru->addr_off = offset; fru->ubuf = buf; bfa_fru_read_send(fru, BFI_TFRU_H2I_READ_REQ); return BFA_STATUS_OK; } /* * Process fru response messages upon receiving interrupts. * * @param[in] fruarg - fru structure * @param[in] msg - message structure */ void bfa_fru_intr(void *fruarg, struct bfi_mbmsg_s *msg) { struct bfa_fru_s *fru = fruarg; struct bfi_fru_rsp_s *rsp = (struct bfi_fru_rsp_s *)msg; u32 status; bfa_trc(fru, msg->mh.msg_id); if (!fru->op_busy) { /* * receiving response after ioc failure */ bfa_trc(fru, 0x9999); return; } switch (msg->mh.msg_id) { case BFI_FRUVPD_I2H_WRITE_RSP: case BFI_TFRU_I2H_WRITE_RSP: status = be32_to_cpu(rsp->status); bfa_trc(fru, status); if (status != BFA_STATUS_OK || fru->residue == 0) { fru->status = status; fru->op_busy = 0; if (fru->cbfn) fru->cbfn(fru->cbarg, fru->status); } else { bfa_trc(fru, fru->offset); if (msg->mh.msg_id == BFI_FRUVPD_I2H_WRITE_RSP) bfa_fru_write_send(fru, BFI_FRUVPD_H2I_WRITE_REQ); else bfa_fru_write_send(fru, BFI_TFRU_H2I_WRITE_REQ); } break; case BFI_FRUVPD_I2H_READ_RSP: case BFI_TFRU_I2H_READ_RSP: status = be32_to_cpu(rsp->status); bfa_trc(fru, status); if (status != BFA_STATUS_OK) { fru->status = status; fru->op_busy = 0; if (fru->cbfn) fru->cbfn(fru->cbarg, fru->status); } else { u32 len = be32_to_cpu(rsp->length); bfa_trc(fru, fru->offset); bfa_trc(fru, len); memcpy(fru->ubuf + fru->offset, fru->dbuf_kva, len); fru->residue -= len; fru->offset += len; if (fru->residue == 0) { fru->status = status; fru->op_busy = 0; if (fru->cbfn) fru->cbfn(fru->cbarg, fru->status); } else { if (msg->mh.msg_id == BFI_FRUVPD_I2H_READ_RSP) bfa_fru_read_send(fru, BFI_FRUVPD_H2I_READ_REQ); else bfa_fru_read_send(fru, BFI_TFRU_H2I_READ_REQ); } } break; default: WARN_ON(1); } } /* * register definitions */ #define FLI_CMD_REG 0x0001d000 #define FLI_RDDATA_REG 0x0001d010 #define FLI_ADDR_REG 0x0001d004 #define FLI_DEV_STATUS_REG 0x0001d014 #define BFA_FLASH_FIFO_SIZE 128 /* fifo size */ #define BFA_FLASH_CHECK_MAX 10000 /* max # of status check */ #define BFA_FLASH_BLOCKING_OP_MAX 1000000 /* max # of blocking op check */ #define BFA_FLASH_WIP_MASK 0x01 /* write in progress bit mask */ enum bfa_flash_cmd { BFA_FLASH_FAST_READ = 0x0b, /* fast read */ BFA_FLASH_READ_STATUS = 0x05, /* read status */ }; /* * Hardware error definition */ enum bfa_flash_err { BFA_FLASH_NOT_PRESENT = -1, /*!< flash not present */ BFA_FLASH_UNINIT = -2, /*!< flash not initialized */ BFA_FLASH_BAD = -3, /*!< flash bad */ BFA_FLASH_BUSY = -4, /*!< flash busy */ BFA_FLASH_ERR_CMD_ACT = -5, /*!< command active never cleared */ BFA_FLASH_ERR_FIFO_CNT = -6, /*!< fifo count never cleared */ BFA_FLASH_ERR_WIP = -7, /*!< write-in-progress never cleared */ BFA_FLASH_ERR_TIMEOUT = -8, /*!< fli timeout */ BFA_FLASH_ERR_LEN = -9, /*!< invalid length */ }; /* * Flash command register data structure */ union bfa_flash_cmd_reg_u { struct { #ifdef __BIG_ENDIAN u32 act:1; u32 rsv:1; u32 write_cnt:9; u32 read_cnt:9; u32 addr_cnt:4; u32 cmd:8; #else u32 cmd:8; u32 addr_cnt:4; u32 read_cnt:9; u32 write_cnt:9; u32 rsv:1; u32 act:1; #endif } r; u32 i; }; /* * Flash device status register data structure */ union bfa_flash_dev_status_reg_u { struct { #ifdef __BIG_ENDIAN u32 rsv:21; u32 fifo_cnt:6; u32 busy:1; u32 init_status:1; u32 present:1; u32 bad:1; u32 good:1; #else u32 good:1; u32 bad:1; u32 present:1; u32 init_status:1; u32 busy:1; u32 fifo_cnt:6; u32 rsv:21; #endif } r; u32 i; }; /* * Flash address register data structure */ union bfa_flash_addr_reg_u { struct { #ifdef __BIG_ENDIAN u32 addr:24; u32 dummy:8; #else u32 dummy:8; u32 addr:24; #endif } r; u32 i; }; /* * dg flash_raw_private Flash raw private functions */ static void bfa_flash_set_cmd(void __iomem *pci_bar, u8 wr_cnt, u8 rd_cnt, u8 ad_cnt, u8 op) { union bfa_flash_cmd_reg_u cmd; cmd.i = 0; cmd.r.act = 1; cmd.r.write_cnt = wr_cnt; cmd.r.read_cnt = rd_cnt; cmd.r.addr_cnt = ad_cnt; cmd.r.cmd = op; writel(cmd.i, (pci_bar + FLI_CMD_REG)); } static void bfa_flash_set_addr(void __iomem *pci_bar, u32 address) { union bfa_flash_addr_reg_u addr; addr.r.addr = address & 0x00ffffff; addr.r.dummy = 0; writel(addr.i, (pci_bar + FLI_ADDR_REG)); } static int bfa_flash_cmd_act_check(void __iomem *pci_bar) { union bfa_flash_cmd_reg_u cmd; cmd.i = readl(pci_bar + FLI_CMD_REG); if (cmd.r.act) return BFA_FLASH_ERR_CMD_ACT; return 0; } /* * @brief * Flush FLI data fifo. * * @param[in] pci_bar - pci bar address * @param[in] dev_status - device status * * Return 0 on success, negative error number on error. */ static u32 bfa_flash_fifo_flush(void __iomem *pci_bar) { u32 i; union bfa_flash_dev_status_reg_u dev_status; dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG); if (!dev_status.r.fifo_cnt) return 0; /* fifo counter in terms of words */ for (i = 0; i < dev_status.r.fifo_cnt; i++) readl(pci_bar + FLI_RDDATA_REG); /* * Check the device status. It may take some time. */ for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) { dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG); if (!dev_status.r.fifo_cnt) break; } if (dev_status.r.fifo_cnt) return BFA_FLASH_ERR_FIFO_CNT; return 0; } /* * @brief * Read flash status. * * @param[in] pci_bar - pci bar address * * Return 0 on success, negative error number on error. */ static u32 bfa_flash_status_read(void __iomem *pci_bar) { union bfa_flash_dev_status_reg_u dev_status; int status; u32 ret_status; int i; status = bfa_flash_fifo_flush(pci_bar); if (status < 0) return status; bfa_flash_set_cmd(pci_bar, 0, 4, 0, BFA_FLASH_READ_STATUS); for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) { status = bfa_flash_cmd_act_check(pci_bar); if (!status) break; } if (status) return status; dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG); if (!dev_status.r.fifo_cnt) return BFA_FLASH_BUSY; ret_status = readl(pci_bar + FLI_RDDATA_REG); ret_status >>= 24; status = bfa_flash_fifo_flush(pci_bar); if (status < 0) return status; return ret_status; } /* * @brief * Start flash read operation. * * @param[in] pci_bar - pci bar address * @param[in] offset - flash address offset * @param[in] len - read data length * @param[in] buf - read data buffer * * Return 0 on success, negative error number on error. */ static u32 bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len, char *buf) { int status; /* * len must be mutiple of 4 and not exceeding fifo size */ if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0) return BFA_FLASH_ERR_LEN; /* * check status */ status = bfa_flash_status_read(pci_bar); if (status == BFA_FLASH_BUSY) status = bfa_flash_status_read(pci_bar); if (status < 0) return status; /* * check if write-in-progress bit is cleared */ if (status & BFA_FLASH_WIP_MASK) return BFA_FLASH_ERR_WIP; bfa_flash_set_addr(pci_bar, offset); bfa_flash_set_cmd(pci_bar, 0, (u8)len, 4, BFA_FLASH_FAST_READ); return 0; } /* * @brief * Check flash read operation. * * @param[in] pci_bar - pci bar address * * Return flash device status, 1 if busy, 0 if not. */ static u32 bfa_flash_read_check(void __iomem *pci_bar) { if (bfa_flash_cmd_act_check(pci_bar)) return 1; return 0; } /* * @brief * End flash read operation. * * @param[in] pci_bar - pci bar address * @param[in] len - read data length * @param[in] buf - read data buffer * */ static void bfa_flash_read_end(void __iomem *pci_bar, u32 len, char *buf) { u32 i; /* * read data fifo up to 32 words */ for (i = 0; i < len; i += 4) { u32 w = readl(pci_bar + FLI_RDDATA_REG); *((u32 *) (buf + i)) = swab32(w); } bfa_flash_fifo_flush(pci_bar); } /* * @brief * Perform flash raw read. * * @param[in] pci_bar - pci bar address * @param[in] offset - flash partition address offset * @param[in] buf - read data buffer * @param[in] len - read data length * * Return status. */ #define FLASH_BLOCKING_OP_MAX 500 #define FLASH_SEM_LOCK_REG 0x18820 static int bfa_raw_sem_get(void __iomem *bar) { int locked; locked = readl((bar + FLASH_SEM_LOCK_REG)); return !locked; } static bfa_status_t bfa_flash_sem_get(void __iomem *bar) { u32 n = FLASH_BLOCKING_OP_MAX; while (!bfa_raw_sem_get(bar)) { if (--n <= 0) return BFA_STATUS_BADFLASH; mdelay(10); } return BFA_STATUS_OK; } static void bfa_flash_sem_put(void __iomem *bar) { writel(0, (bar + FLASH_SEM_LOCK_REG)); } bfa_status_t bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf, u32 len) { u32 n; int status; u32 off, l, s, residue, fifo_sz; residue = len; off = 0; fifo_sz = BFA_FLASH_FIFO_SIZE; status = bfa_flash_sem_get(pci_bar); if (status != BFA_STATUS_OK) return status; while (residue) { s = offset + off; n = s / fifo_sz; l = (n + 1) * fifo_sz - s; if (l > residue) l = residue; status = bfa_flash_read_start(pci_bar, offset + off, l, &buf[off]); if (status < 0) { bfa_flash_sem_put(pci_bar); return BFA_STATUS_FAILED; } n = BFA_FLASH_BLOCKING_OP_MAX; while (bfa_flash_read_check(pci_bar)) { if (--n <= 0) { bfa_flash_sem_put(pci_bar); return BFA_STATUS_FAILED; } } bfa_flash_read_end(pci_bar, l, &buf[off]); residue -= l; off += l; } bfa_flash_sem_put(pci_bar); return BFA_STATUS_OK; }
linux-master
drivers/scsi/bfa/bfa_ioc.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. * Copyright (c) 2014- QLogic Corporation. * All rights reserved * www.qlogic.com * * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. */ #include "bfad_drv.h" #include "bfad_im.h" #include "bfa_fcs.h" #include "bfa_fcbuild.h" #include "bfa_fc.h" BFA_TRC_FILE(FCS, PORT); /* * ALPA to LIXA bitmap mapping * * ALPA 0x00 (Word 0, Bit 30) is invalid for N_Ports. Also Word 0 Bit 31 * is for L_bit (login required) and is filled as ALPA 0x00 here. */ static const u8 loop_alpa_map[] = { 0x00, 0x00, 0x01, 0x02, 0x04, 0x08, 0x0F, 0x10, /* Word 0 Bits 31..24 */ 0x17, 0x18, 0x1B, 0x1D, 0x1E, 0x1F, 0x23, 0x25, /* Word 0 Bits 23..16 */ 0x26, 0x27, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, /* Word 0 Bits 15..08 */ 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x39, 0x3A, /* Word 0 Bits 07..00 */ 0x3C, 0x43, 0x45, 0x46, 0x47, 0x49, 0x4A, 0x4B, /* Word 1 Bits 31..24 */ 0x4C, 0x4D, 0x4E, 0x51, 0x52, 0x53, 0x54, 0x55, /* Word 1 Bits 23..16 */ 0x56, 0x59, 0x5A, 0x5C, 0x63, 0x65, 0x66, 0x67, /* Word 1 Bits 15..08 */ 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x71, 0x72, /* Word 1 Bits 07..00 */ 0x73, 0x74, 0x75, 0x76, 0x79, 0x7A, 0x7C, 0x80, /* Word 2 Bits 31..24 */ 0x81, 0x82, 0x84, 0x88, 0x8F, 0x90, 0x97, 0x98, /* Word 2 Bits 23..16 */ 0x9B, 0x9D, 0x9E, 0x9F, 0xA3, 0xA5, 0xA6, 0xA7, /* Word 2 Bits 15..08 */ 0xA9, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xB1, 0xB2, /* Word 2 Bits 07..00 */ 0xB3, 0xB4, 0xB5, 0xB6, 0xB9, 0xBA, 0xBC, 0xC3, /* Word 3 Bits 31..24 */ 0xC5, 0xC6, 0xC7, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, /* Word 3 Bits 23..16 */ 0xCE, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD9, /* Word 3 Bits 15..08 */ 0xDA, 0xDC, 0xE0, 0xE1, 0xE2, 0xE4, 0xE8, 0xEF, /* Word 3 Bits 07..00 */ }; static void bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs, u8 reason_code, u8 reason_code_expl); static void bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs, struct fc_logi_s *plogi); static void bfa_fcs_lport_online_actions(struct bfa_fcs_lport_s *port); static void bfa_fcs_lport_offline_actions(struct bfa_fcs_lport_s *port); static void bfa_fcs_lport_unknown_init(struct bfa_fcs_lport_s *port); static void bfa_fcs_lport_unknown_online(struct bfa_fcs_lport_s *port); static void bfa_fcs_lport_unknown_offline(struct bfa_fcs_lport_s *port); static void bfa_fcs_lport_deleted(struct bfa_fcs_lport_s *port); static void bfa_fcs_lport_echo(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs, struct fc_echo_s *echo, u16 len); static void bfa_fcs_lport_rnid(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs, struct fc_rnid_cmd_s *rnid, u16 len); static void bfa_fs_port_get_gen_topo_data(struct bfa_fcs_lport_s *port, struct fc_rnid_general_topology_data_s *gen_topo_data); static void bfa_fcs_lport_fab_init(struct bfa_fcs_lport_s *port); static void bfa_fcs_lport_fab_online(struct bfa_fcs_lport_s *port); static void bfa_fcs_lport_fab_offline(struct bfa_fcs_lport_s *port); static void bfa_fcs_lport_n2n_init(struct bfa_fcs_lport_s *port); static void bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port); static void bfa_fcs_lport_n2n_offline(struct bfa_fcs_lport_s *port); static void bfa_fcs_lport_loop_init(struct bfa_fcs_lport_s *port); static void bfa_fcs_lport_loop_online(struct bfa_fcs_lport_s *port); static void bfa_fcs_lport_loop_offline(struct bfa_fcs_lport_s *port); static struct { void (*init) (struct bfa_fcs_lport_s *port); void (*online) (struct bfa_fcs_lport_s *port); void (*offline) (struct bfa_fcs_lport_s *port); } __port_action[] = { [BFA_FCS_FABRIC_UNKNOWN] = { .init = bfa_fcs_lport_unknown_init, .online = bfa_fcs_lport_unknown_online, .offline = bfa_fcs_lport_unknown_offline }, [BFA_FCS_FABRIC_SWITCHED] = { .init = bfa_fcs_lport_fab_init, .online = bfa_fcs_lport_fab_online, .offline = bfa_fcs_lport_fab_offline }, [BFA_FCS_FABRIC_N2N] = { .init = bfa_fcs_lport_n2n_init, .online = bfa_fcs_lport_n2n_online, .offline = bfa_fcs_lport_n2n_offline }, [BFA_FCS_FABRIC_LOOP] = { .init = bfa_fcs_lport_loop_init, .online = bfa_fcs_lport_loop_online, .offline = bfa_fcs_lport_loop_offline }, }; /* * fcs_port_sm FCS logical port state machine */ enum bfa_fcs_lport_event { BFA_FCS_PORT_SM_CREATE = 1, BFA_FCS_PORT_SM_ONLINE = 2, BFA_FCS_PORT_SM_OFFLINE = 3, BFA_FCS_PORT_SM_DELETE = 4, BFA_FCS_PORT_SM_DELRPORT = 5, BFA_FCS_PORT_SM_STOP = 6, }; static void bfa_fcs_lport_sm_uninit(struct bfa_fcs_lport_s *port, enum bfa_fcs_lport_event event); static void bfa_fcs_lport_sm_init(struct bfa_fcs_lport_s *port, enum bfa_fcs_lport_event event); static void bfa_fcs_lport_sm_online(struct bfa_fcs_lport_s *port, enum bfa_fcs_lport_event event); static void bfa_fcs_lport_sm_offline(struct bfa_fcs_lport_s *port, enum bfa_fcs_lport_event event); static void bfa_fcs_lport_sm_deleting(struct bfa_fcs_lport_s *port, enum bfa_fcs_lport_event event); static void bfa_fcs_lport_sm_stopping(struct bfa_fcs_lport_s *port, enum bfa_fcs_lport_event event); static void bfa_fcs_lport_sm_uninit( struct bfa_fcs_lport_s *port, enum bfa_fcs_lport_event event) { bfa_trc(port->fcs, port->port_cfg.pwwn); bfa_trc(port->fcs, event); switch (event) { case BFA_FCS_PORT_SM_CREATE: bfa_sm_set_state(port, bfa_fcs_lport_sm_init); break; default: bfa_sm_fault(port->fcs, event); } } static void bfa_fcs_lport_sm_init(struct bfa_fcs_lport_s *port, enum bfa_fcs_lport_event event) { bfa_trc(port->fcs, port->port_cfg.pwwn); bfa_trc(port->fcs, event); switch (event) { case BFA_FCS_PORT_SM_ONLINE: bfa_sm_set_state(port, bfa_fcs_lport_sm_online); bfa_fcs_lport_online_actions(port); break; case BFA_FCS_PORT_SM_DELETE: bfa_sm_set_state(port, bfa_fcs_lport_sm_uninit); bfa_fcs_lport_deleted(port); break; case BFA_FCS_PORT_SM_STOP: /* If vport - send completion call back */ if (port->vport) bfa_fcs_vport_stop_comp(port->vport); else bfa_wc_down(&(port->fabric->stop_wc)); break; case BFA_FCS_PORT_SM_OFFLINE: break; default: bfa_sm_fault(port->fcs, event); } } static void bfa_fcs_lport_sm_online( struct bfa_fcs_lport_s *port, enum bfa_fcs_lport_event event) { struct bfa_fcs_rport_s *rport; struct list_head *qe, *qen; bfa_trc(port->fcs, port->port_cfg.pwwn); bfa_trc(port->fcs, event); switch (event) { case BFA_FCS_PORT_SM_OFFLINE: bfa_sm_set_state(port, bfa_fcs_lport_sm_offline); bfa_fcs_lport_offline_actions(port); break; case BFA_FCS_PORT_SM_STOP: __port_action[port->fabric->fab_type].offline(port); if (port->num_rports == 0) { bfa_sm_set_state(port, bfa_fcs_lport_sm_init); /* If vport - send completion call back */ if (port->vport) bfa_fcs_vport_stop_comp(port->vport); else bfa_wc_down(&(port->fabric->stop_wc)); } else { bfa_sm_set_state(port, bfa_fcs_lport_sm_stopping); list_for_each_safe(qe, qen, &port->rport_q) { rport = (struct bfa_fcs_rport_s *) qe; bfa_sm_send_event(rport, RPSM_EVENT_DELETE); } } break; case BFA_FCS_PORT_SM_DELETE: __port_action[port->fabric->fab_type].offline(port); if (port->num_rports == 0) { bfa_sm_set_state(port, bfa_fcs_lport_sm_uninit); bfa_fcs_lport_deleted(port); } else { bfa_sm_set_state(port, bfa_fcs_lport_sm_deleting); list_for_each_safe(qe, qen, &port->rport_q) { rport = (struct bfa_fcs_rport_s *) qe; bfa_sm_send_event(rport, RPSM_EVENT_DELETE); } } break; case BFA_FCS_PORT_SM_DELRPORT: break; default: bfa_sm_fault(port->fcs, event); } } static void bfa_fcs_lport_sm_offline( struct bfa_fcs_lport_s *port, enum bfa_fcs_lport_event event) { struct bfa_fcs_rport_s *rport; struct list_head *qe, *qen; bfa_trc(port->fcs, port->port_cfg.pwwn); bfa_trc(port->fcs, event); switch (event) { case BFA_FCS_PORT_SM_ONLINE: bfa_sm_set_state(port, bfa_fcs_lport_sm_online); bfa_fcs_lport_online_actions(port); break; case BFA_FCS_PORT_SM_STOP: if (port->num_rports == 0) { bfa_sm_set_state(port, bfa_fcs_lport_sm_init); /* If vport - send completion call back */ if (port->vport) bfa_fcs_vport_stop_comp(port->vport); else bfa_wc_down(&(port->fabric->stop_wc)); } else { bfa_sm_set_state(port, bfa_fcs_lport_sm_stopping); list_for_each_safe(qe, qen, &port->rport_q) { rport = (struct bfa_fcs_rport_s *) qe; bfa_sm_send_event(rport, RPSM_EVENT_DELETE); } } break; case BFA_FCS_PORT_SM_DELETE: if (port->num_rports == 0) { bfa_sm_set_state(port, bfa_fcs_lport_sm_uninit); bfa_fcs_lport_deleted(port); } else { bfa_sm_set_state(port, bfa_fcs_lport_sm_deleting); list_for_each_safe(qe, qen, &port->rport_q) { rport = (struct bfa_fcs_rport_s *) qe; bfa_sm_send_event(rport, RPSM_EVENT_DELETE); } } break; case BFA_FCS_PORT_SM_DELRPORT: case BFA_FCS_PORT_SM_OFFLINE: break; default: bfa_sm_fault(port->fcs, event); } } static void bfa_fcs_lport_sm_stopping(struct bfa_fcs_lport_s *port, enum bfa_fcs_lport_event event) { bfa_trc(port->fcs, port->port_cfg.pwwn); bfa_trc(port->fcs, event); switch (event) { case BFA_FCS_PORT_SM_DELRPORT: if (port->num_rports == 0) { bfa_sm_set_state(port, bfa_fcs_lport_sm_init); /* If vport - send completion call back */ if (port->vport) bfa_fcs_vport_stop_comp(port->vport); else bfa_wc_down(&(port->fabric->stop_wc)); } break; default: bfa_sm_fault(port->fcs, event); } } static void bfa_fcs_lport_sm_deleting( struct bfa_fcs_lport_s *port, enum bfa_fcs_lport_event event) { bfa_trc(port->fcs, port->port_cfg.pwwn); bfa_trc(port->fcs, event); switch (event) { case BFA_FCS_PORT_SM_DELRPORT: if (port->num_rports == 0) { bfa_sm_set_state(port, bfa_fcs_lport_sm_uninit); bfa_fcs_lport_deleted(port); } break; default: bfa_sm_fault(port->fcs, event); } } /* * fcs_port_pvt */ /* * Send AEN notification */ static void bfa_fcs_lport_aen_post(struct bfa_fcs_lport_s *port, enum bfa_lport_aen_event event) { struct bfad_s *bfad = (struct bfad_s *)port->fabric->fcs->bfad; struct bfa_aen_entry_s *aen_entry; bfad_get_aen_entry(bfad, aen_entry); if (!aen_entry) return; aen_entry->aen_data.lport.vf_id = port->fabric->vf_id; aen_entry->aen_data.lport.roles = port->port_cfg.roles; aen_entry->aen_data.lport.ppwwn = bfa_fcs_lport_get_pwwn( bfa_fcs_get_base_port(port->fcs)); aen_entry->aen_data.lport.lpwwn = bfa_fcs_lport_get_pwwn(port); /* Send the AEN notification */ bfad_im_post_vendor_event(aen_entry, bfad, ++port->fcs->fcs_aen_seq, BFA_AEN_CAT_LPORT, event); } /* * Send a LS reject */ static void bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs, u8 reason_code, u8 reason_code_expl) { struct fchs_s fchs; struct bfa_fcxp_s *fcxp; struct bfa_rport_s *bfa_rport = NULL; int len; bfa_trc(port->fcs, rx_fchs->d_id); bfa_trc(port->fcs, rx_fchs->s_id); fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE); if (!fcxp) return; len = fc_ls_rjt_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rx_fchs->s_id, bfa_fcs_lport_get_fcid(port), rx_fchs->ox_id, reason_code, reason_code_expl); bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); } /* * Send a FCCT Reject */ static void bfa_fcs_lport_send_fcgs_rjt(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs, u8 reason_code, u8 reason_code_expl) { struct fchs_s fchs; struct bfa_fcxp_s *fcxp; struct bfa_rport_s *bfa_rport = NULL; int len; struct ct_hdr_s *rx_cthdr = (struct ct_hdr_s *)(rx_fchs + 1); struct ct_hdr_s *ct_hdr; bfa_trc(port->fcs, rx_fchs->d_id); bfa_trc(port->fcs, rx_fchs->s_id); fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE); if (!fcxp) return; ct_hdr = bfa_fcxp_get_reqbuf(fcxp); ct_hdr->gs_type = rx_cthdr->gs_type; ct_hdr->gs_sub_type = rx_cthdr->gs_sub_type; len = fc_gs_rjt_build(&fchs, ct_hdr, rx_fchs->s_id, bfa_fcs_lport_get_fcid(port), rx_fchs->ox_id, reason_code, reason_code_expl); bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); } /* * Process incoming plogi from a remote port. */ static void bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs, struct fc_logi_s *plogi) { struct bfa_fcs_rport_s *rport; bfa_trc(port->fcs, rx_fchs->d_id); bfa_trc(port->fcs, rx_fchs->s_id); /* * If min cfg mode is enabled, drop any incoming PLOGIs */ if (__fcs_min_cfg(port->fcs)) { bfa_trc(port->fcs, rx_fchs->s_id); return; } if (fc_plogi_parse(rx_fchs) != FC_PARSE_OK) { bfa_trc(port->fcs, rx_fchs->s_id); /* * send a LS reject */ bfa_fcs_lport_send_ls_rjt(port, rx_fchs, FC_LS_RJT_RSN_PROTOCOL_ERROR, FC_LS_RJT_EXP_SPARMS_ERR_OPTIONS); return; } /* * Direct Attach P2P mode : verify address assigned by the r-port. */ if ((!bfa_fcs_fabric_is_switched(port->fabric)) && (memcmp((void *)&bfa_fcs_lport_get_pwwn(port), (void *)&plogi->port_name, sizeof(wwn_t)) < 0)) { if (BFA_FCS_PID_IS_WKA(rx_fchs->d_id)) { /* Address assigned to us cannot be a WKA */ bfa_fcs_lport_send_ls_rjt(port, rx_fchs, FC_LS_RJT_RSN_PROTOCOL_ERROR, FC_LS_RJT_EXP_INVALID_NPORT_ID); return; } port->pid = rx_fchs->d_id; bfa_lps_set_n2n_pid(port->fabric->lps, rx_fchs->d_id); } /* * First, check if we know the device by pwwn. */ rport = bfa_fcs_lport_get_rport_by_pwwn(port, plogi->port_name); if (rport) { /* * Direct Attach P2P mode : handle address assigned by r-port. */ if ((!bfa_fcs_fabric_is_switched(port->fabric)) && (memcmp((void *)&bfa_fcs_lport_get_pwwn(port), (void *)&plogi->port_name, sizeof(wwn_t)) < 0)) { port->pid = rx_fchs->d_id; bfa_lps_set_n2n_pid(port->fabric->lps, rx_fchs->d_id); rport->pid = rx_fchs->s_id; } bfa_fcs_rport_plogi(rport, rx_fchs, plogi); return; } /* * Next, lookup rport by PID. */ rport = bfa_fcs_lport_get_rport_by_pid(port, rx_fchs->s_id); if (!rport) { /* * Inbound PLOGI from a new device. */ bfa_fcs_rport_plogi_create(port, rx_fchs, plogi); return; } /* * Rport is known only by PID. */ if (rport->pwwn) { /* * This is a different device with the same pid. Old device * disappeared. Send implicit LOGO to old device. */ WARN_ON(rport->pwwn == plogi->port_name); bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP); /* * Inbound PLOGI from a new device (with old PID). */ bfa_fcs_rport_plogi_create(port, rx_fchs, plogi); return; } /* * PLOGI crossing each other. */ WARN_ON(rport->pwwn != WWN_NULL); bfa_fcs_rport_plogi(rport, rx_fchs, plogi); } /* * Process incoming ECHO. * Since it does not require a login, it is processed here. */ static void bfa_fcs_lport_echo(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs, struct fc_echo_s *echo, u16 rx_len) { struct fchs_s fchs; struct bfa_fcxp_s *fcxp; struct bfa_rport_s *bfa_rport = NULL; int len, pyld_len; bfa_trc(port->fcs, rx_fchs->s_id); bfa_trc(port->fcs, rx_fchs->d_id); fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE); if (!fcxp) return; len = fc_ls_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rx_fchs->s_id, bfa_fcs_lport_get_fcid(port), rx_fchs->ox_id); /* * Copy the payload (if any) from the echo frame */ pyld_len = rx_len - sizeof(struct fchs_s); bfa_trc(port->fcs, rx_len); bfa_trc(port->fcs, pyld_len); if (pyld_len > len) memcpy(((u8 *) bfa_fcxp_get_reqbuf(fcxp)) + sizeof(struct fc_echo_s), (echo + 1), (pyld_len - sizeof(struct fc_echo_s))); bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, pyld_len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); } /* * Process incoming RNID. * Since it does not require a login, it is processed here. */ static void bfa_fcs_lport_rnid(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs, struct fc_rnid_cmd_s *rnid, u16 rx_len) { struct fc_rnid_common_id_data_s common_id_data; struct fc_rnid_general_topology_data_s gen_topo_data; struct fchs_s fchs; struct bfa_fcxp_s *fcxp; struct bfa_rport_s *bfa_rport = NULL; u16 len; u32 data_format; bfa_trc(port->fcs, rx_fchs->s_id); bfa_trc(port->fcs, rx_fchs->d_id); bfa_trc(port->fcs, rx_len); fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE); if (!fcxp) return; /* * Check Node Indentification Data Format * We only support General Topology Discovery Format. * For any other requested Data Formats, we return Common Node Id Data * only, as per FC-LS. */ bfa_trc(port->fcs, rnid->node_id_data_format); if (rnid->node_id_data_format == RNID_NODEID_DATA_FORMAT_DISCOVERY) { data_format = RNID_NODEID_DATA_FORMAT_DISCOVERY; /* * Get General topology data for this port */ bfa_fs_port_get_gen_topo_data(port, &gen_topo_data); } else { data_format = RNID_NODEID_DATA_FORMAT_COMMON; } /* * Copy the Node Id Info */ common_id_data.port_name = bfa_fcs_lport_get_pwwn(port); common_id_data.node_name = bfa_fcs_lport_get_nwwn(port); len = fc_rnid_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rx_fchs->s_id, bfa_fcs_lport_get_fcid(port), rx_fchs->ox_id, data_format, &common_id_data, &gen_topo_data); bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); } /* * Fill out General Topolpgy Discovery Data for RNID ELS. */ static void bfa_fs_port_get_gen_topo_data(struct bfa_fcs_lport_s *port, struct fc_rnid_general_topology_data_s *gen_topo_data) { memset(gen_topo_data, 0, sizeof(struct fc_rnid_general_topology_data_s)); gen_topo_data->asso_type = cpu_to_be32(RNID_ASSOCIATED_TYPE_HOST); gen_topo_data->phy_port_num = 0; /* @todo */ gen_topo_data->num_attached_nodes = cpu_to_be32(1); } static void bfa_fcs_lport_online_actions(struct bfa_fcs_lport_s *port) { struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad; char lpwwn_buf[BFA_STRING_32]; bfa_trc(port->fcs, port->fabric->oper_type); __port_action[port->fabric->fab_type].init(port); __port_action[port->fabric->fab_type].online(port); wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port)); BFA_LOG(KERN_WARNING, bfad, bfa_log_level, "Logical port online: WWN = %s Role = %s\n", lpwwn_buf, "Initiator"); bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_ONLINE); bfad->bfad_flags |= BFAD_PORT_ONLINE; } static void bfa_fcs_lport_offline_actions(struct bfa_fcs_lport_s *port) { struct list_head *qe, *qen; struct bfa_fcs_rport_s *rport; struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad; char lpwwn_buf[BFA_STRING_32]; bfa_trc(port->fcs, port->fabric->oper_type); __port_action[port->fabric->fab_type].offline(port); wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port)); if (bfa_sm_cmp_state(port->fabric, bfa_fcs_fabric_sm_online) == BFA_TRUE) { BFA_LOG(KERN_WARNING, bfad, bfa_log_level, "Logical port lost fabric connectivity: WWN = %s Role = %s\n", lpwwn_buf, "Initiator"); bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_DISCONNECT); } else { BFA_LOG(KERN_WARNING, bfad, bfa_log_level, "Logical port taken offline: WWN = %s Role = %s\n", lpwwn_buf, "Initiator"); bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_OFFLINE); } list_for_each_safe(qe, qen, &port->rport_q) { rport = (struct bfa_fcs_rport_s *) qe; bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP); } } static void bfa_fcs_lport_unknown_init(struct bfa_fcs_lport_s *port) { WARN_ON(1); } static void bfa_fcs_lport_unknown_online(struct bfa_fcs_lport_s *port) { WARN_ON(1); } static void bfa_fcs_lport_unknown_offline(struct bfa_fcs_lport_s *port) { WARN_ON(1); } static void bfa_fcs_lport_abts_acc(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs) { struct fchs_s fchs; struct bfa_fcxp_s *fcxp; int len; bfa_trc(port->fcs, rx_fchs->d_id); bfa_trc(port->fcs, rx_fchs->s_id); fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE); if (!fcxp) return; len = fc_ba_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rx_fchs->s_id, bfa_fcs_lport_get_fcid(port), rx_fchs->ox_id, 0); bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); } static void bfa_fcs_lport_deleted(struct bfa_fcs_lport_s *port) { struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad; char lpwwn_buf[BFA_STRING_32]; wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port)); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Logical port deleted: WWN = %s Role = %s\n", lpwwn_buf, "Initiator"); bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_DELETE); /* Base port will be deleted by the OS driver */ if (port->vport) bfa_fcs_vport_delete_comp(port->vport); else bfa_wc_down(&port->fabric->wc); } /* * Unsolicited frame receive handling. */ void bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport, struct fchs_s *fchs, u16 len) { u32 pid = fchs->s_id; struct bfa_fcs_rport_s *rport = NULL; struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); bfa_stats(lport, uf_recvs); bfa_trc(lport->fcs, fchs->type); if (!bfa_fcs_lport_is_online(lport)) { /* * In direct attach topology, it is possible to get a PLOGI * before the lport is online due to port feature * (QoS/Trunk/FEC/CR), so send a rjt */ if ((fchs->type == FC_TYPE_ELS) && (els_cmd->els_code == FC_ELS_PLOGI)) { bfa_fcs_lport_send_ls_rjt(lport, fchs, FC_LS_RJT_RSN_UNABLE_TO_PERF_CMD, FC_LS_RJT_EXP_NO_ADDL_INFO); bfa_stats(lport, plogi_rcvd); } else bfa_stats(lport, uf_recv_drops); return; } /* * First, handle ELSs that donot require a login. */ /* * Handle PLOGI first */ if ((fchs->type == FC_TYPE_ELS) && (els_cmd->els_code == FC_ELS_PLOGI)) { bfa_fcs_lport_plogi(lport, fchs, (struct fc_logi_s *) els_cmd); return; } /* * Handle ECHO separately. */ if ((fchs->type == FC_TYPE_ELS) && (els_cmd->els_code == FC_ELS_ECHO)) { bfa_fcs_lport_echo(lport, fchs, (struct fc_echo_s *)els_cmd, len); return; } /* * Handle RNID separately. */ if ((fchs->type == FC_TYPE_ELS) && (els_cmd->els_code == FC_ELS_RNID)) { bfa_fcs_lport_rnid(lport, fchs, (struct fc_rnid_cmd_s *) els_cmd, len); return; } if (fchs->type == FC_TYPE_BLS) { if ((fchs->routing == FC_RTG_BASIC_LINK) && (fchs->cat_info == FC_CAT_ABTS)) bfa_fcs_lport_abts_acc(lport, fchs); return; } if (fchs->type == FC_TYPE_SERVICES) { /* * Unhandled FC-GS frames. Send a FC-CT Reject */ bfa_fcs_lport_send_fcgs_rjt(lport, fchs, CT_RSN_NOT_SUPP, CT_NS_EXP_NOADDITIONAL); return; } /* * look for a matching remote port ID */ rport = bfa_fcs_lport_get_rport_by_pid(lport, pid); if (rport) { bfa_trc(rport->fcs, fchs->s_id); bfa_trc(rport->fcs, fchs->d_id); bfa_trc(rport->fcs, fchs->type); bfa_fcs_rport_uf_recv(rport, fchs, len); return; } /* * Only handles ELS frames for now. */ if (fchs->type != FC_TYPE_ELS) { bfa_trc(lport->fcs, fchs->s_id); bfa_trc(lport->fcs, fchs->d_id); /* ignore type FC_TYPE_FC_FSS */ if (fchs->type != FC_TYPE_FC_FSS) bfa_sm_fault(lport->fcs, fchs->type); return; } bfa_trc(lport->fcs, els_cmd->els_code); if (els_cmd->els_code == FC_ELS_RSCN) { bfa_fcs_lport_scn_process_rscn(lport, fchs, len); return; } if (els_cmd->els_code == FC_ELS_LOGO) { /* * @todo Handle LOGO frames received. */ return; } if (els_cmd->els_code == FC_ELS_PRLI) { /* * @todo Handle PRLI frames received. */ return; } /* * Unhandled ELS frames. Send a LS_RJT. */ bfa_fcs_lport_send_ls_rjt(lport, fchs, FC_LS_RJT_RSN_CMD_NOT_SUPP, FC_LS_RJT_EXP_NO_ADDL_INFO); } /* * PID based Lookup for a R-Port in the Port R-Port Queue */ struct bfa_fcs_rport_s * bfa_fcs_lport_get_rport_by_pid(struct bfa_fcs_lport_s *port, u32 pid) { struct bfa_fcs_rport_s *rport; struct list_head *qe; list_for_each(qe, &port->rport_q) { rport = (struct bfa_fcs_rport_s *) qe; if (rport->pid == pid) return rport; } bfa_trc(port->fcs, pid); return NULL; } /* * OLD_PID based Lookup for a R-Port in the Port R-Port Queue */ struct bfa_fcs_rport_s * bfa_fcs_lport_get_rport_by_old_pid(struct bfa_fcs_lport_s *port, u32 pid) { struct bfa_fcs_rport_s *rport; struct list_head *qe; list_for_each(qe, &port->rport_q) { rport = (struct bfa_fcs_rport_s *) qe; if (rport->old_pid == pid) return rport; } bfa_trc(port->fcs, pid); return NULL; } /* * PWWN based Lookup for a R-Port in the Port R-Port Queue */ struct bfa_fcs_rport_s * bfa_fcs_lport_get_rport_by_pwwn(struct bfa_fcs_lport_s *port, wwn_t pwwn) { struct bfa_fcs_rport_s *rport; struct list_head *qe; list_for_each(qe, &port->rport_q) { rport = (struct bfa_fcs_rport_s *) qe; if (wwn_is_equal(rport->pwwn, pwwn)) return rport; } bfa_trc(port->fcs, pwwn); return NULL; } /* * NWWN based Lookup for a R-Port in the Port R-Port Queue */ struct bfa_fcs_rport_s * bfa_fcs_lport_get_rport_by_nwwn(struct bfa_fcs_lport_s *port, wwn_t nwwn) { struct bfa_fcs_rport_s *rport; struct list_head *qe; list_for_each(qe, &port->rport_q) { rport = (struct bfa_fcs_rport_s *) qe; if (wwn_is_equal(rport->nwwn, nwwn)) return rport; } bfa_trc(port->fcs, nwwn); return NULL; } /* * PWWN & PID based Lookup for a R-Port in the Port R-Port Queue */ struct bfa_fcs_rport_s * bfa_fcs_lport_get_rport_by_qualifier(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 pid) { struct bfa_fcs_rport_s *rport; struct list_head *qe; list_for_each(qe, &port->rport_q) { rport = (struct bfa_fcs_rport_s *) qe; if (wwn_is_equal(rport->pwwn, pwwn) && rport->pid == pid) return rport; } bfa_trc(port->fcs, pwwn); return NULL; } /* * Called by rport module when new rports are discovered. */ void bfa_fcs_lport_add_rport( struct bfa_fcs_lport_s *port, struct bfa_fcs_rport_s *rport) { list_add_tail(&rport->qe, &port->rport_q); port->num_rports++; } /* * Called by rport module to when rports are deleted. */ void bfa_fcs_lport_del_rport( struct bfa_fcs_lport_s *port, struct bfa_fcs_rport_s *rport) { WARN_ON(!bfa_q_is_on_q(&port->rport_q, rport)); list_del(&rport->qe); port->num_rports--; bfa_sm_send_event(port, BFA_FCS_PORT_SM_DELRPORT); } /* * Called by fabric for base port when fabric login is complete. * Called by vport for virtual ports when FDISC is complete. */ void bfa_fcs_lport_online(struct bfa_fcs_lport_s *port) { bfa_sm_send_event(port, BFA_FCS_PORT_SM_ONLINE); } /* * Called by fabric for base port when fabric goes offline. * Called by vport for virtual ports when virtual port becomes offline. */ void bfa_fcs_lport_offline(struct bfa_fcs_lport_s *port) { bfa_sm_send_event(port, BFA_FCS_PORT_SM_OFFLINE); } /* * Called by fabric for base port and by vport for virtual ports * when target mode driver is unloaded. */ void bfa_fcs_lport_stop(struct bfa_fcs_lport_s *port) { bfa_sm_send_event(port, BFA_FCS_PORT_SM_STOP); } /* * Called by fabric to delete base lport and associated resources. * * Called by vport to delete lport and associated resources. Should call * bfa_fcs_vport_delete_comp() for vports on completion. */ void bfa_fcs_lport_delete(struct bfa_fcs_lport_s *port) { bfa_sm_send_event(port, BFA_FCS_PORT_SM_DELETE); } /* * Return TRUE if port is online, else return FALSE */ bfa_boolean_t bfa_fcs_lport_is_online(struct bfa_fcs_lport_s *port) { return bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online); } /* * Attach time initialization of logical ports. */ void bfa_fcs_lport_attach(struct bfa_fcs_lport_s *lport, struct bfa_fcs_s *fcs, u16 vf_id, struct bfa_fcs_vport_s *vport) { lport->fcs = fcs; lport->fabric = bfa_fcs_vf_lookup(fcs, vf_id); lport->vport = vport; lport->lp_tag = (vport) ? vport->lps->bfa_tag : lport->fabric->lps->bfa_tag; INIT_LIST_HEAD(&lport->rport_q); lport->num_rports = 0; } /* * Logical port initialization of base or virtual port. * Called by fabric for base port or by vport for virtual ports. */ void bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport, struct bfa_lport_cfg_s *port_cfg) { struct bfa_fcs_vport_s *vport = lport->vport; struct bfad_s *bfad = (struct bfad_s *)lport->fcs->bfad; char lpwwn_buf[BFA_STRING_32]; lport->port_cfg = *port_cfg; lport->bfad_port = bfa_fcb_lport_new(lport->fcs->bfad, lport, lport->port_cfg.roles, lport->fabric->vf_drv, vport ? vport->vport_drv : NULL); wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(lport)); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "New logical port created: WWN = %s Role = %s\n", lpwwn_buf, "Initiator"); bfa_fcs_lport_aen_post(lport, BFA_LPORT_AEN_NEW); bfa_sm_set_state(lport, bfa_fcs_lport_sm_uninit); bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE); } void bfa_fcs_lport_set_symname(struct bfa_fcs_lport_s *port, char *symname) { strcpy(port->port_cfg.sym_name.symname, symname); if (bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online)) bfa_fcs_lport_ns_util_send_rspn_id( BFA_FCS_GET_NS_FROM_PORT(port), NULL); } /* * fcs_lport_api */ void bfa_fcs_lport_get_attr( struct bfa_fcs_lport_s *port, struct bfa_lport_attr_s *port_attr) { if (bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online)) port_attr->pid = port->pid; else port_attr->pid = 0; port_attr->port_cfg = port->port_cfg; if (port->fabric) { port_attr->port_type = port->fabric->oper_type; port_attr->loopback = bfa_sm_cmp_state(port->fabric, bfa_fcs_fabric_sm_loopback); port_attr->authfail = bfa_sm_cmp_state(port->fabric, bfa_fcs_fabric_sm_auth_failed); port_attr->fabric_name = bfa_fcs_lport_get_fabric_name(port); memcpy(port_attr->fabric_ip_addr, bfa_fcs_lport_get_fabric_ipaddr(port), BFA_FCS_FABRIC_IPADDR_SZ); if (port->vport != NULL) { port_attr->port_type = BFA_PORT_TYPE_VPORT; port_attr->fpma_mac = port->vport->lps->lp_mac; } else { port_attr->fpma_mac = port->fabric->lps->lp_mac; } } else { port_attr->port_type = BFA_PORT_TYPE_UNKNOWN; port_attr->state = BFA_LPORT_UNINIT; } } /* * bfa_fcs_lport_fab port fab functions */ /* * Called by port to initialize fabric services of the base port. */ static void bfa_fcs_lport_fab_init(struct bfa_fcs_lport_s *port) { bfa_fcs_lport_ns_init(port); bfa_fcs_lport_scn_init(port); bfa_fcs_lport_ms_init(port); } /* * Called by port to notify transition to online state. */ static void bfa_fcs_lport_fab_online(struct bfa_fcs_lport_s *port) { bfa_fcs_lport_ns_online(port); bfa_fcs_lport_fab_scn_online(port); } /* * Called by port to notify transition to offline state. */ static void bfa_fcs_lport_fab_offline(struct bfa_fcs_lport_s *port) { bfa_fcs_lport_ns_offline(port); bfa_fcs_lport_scn_offline(port); bfa_fcs_lport_ms_offline(port); } /* * bfa_fcs_lport_n2n functions */ /* * Called by fcs/port to initialize N2N topology. */ static void bfa_fcs_lport_n2n_init(struct bfa_fcs_lport_s *port) { } /* * Called by fcs/port to notify transition to online state. */ static void bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port) { struct bfa_fcs_lport_n2n_s *n2n_port = &port->port_topo.pn2n; struct bfa_lport_cfg_s *pcfg = &port->port_cfg; struct bfa_fcs_rport_s *rport; bfa_trc(port->fcs, pcfg->pwwn); /* * If our PWWN is > than that of the r-port, we have to initiate PLOGI * and assign an Address. if not, we need to wait for its PLOGI. * * If our PWWN is < than that of the remote port, it will send a PLOGI * with the PIDs assigned. The rport state machine take care of this * incoming PLOGI. */ if (memcmp ((void *)&pcfg->pwwn, (void *)&n2n_port->rem_port_wwn, sizeof(wwn_t)) > 0) { port->pid = N2N_LOCAL_PID; bfa_lps_set_n2n_pid(port->fabric->lps, N2N_LOCAL_PID); /* * First, check if we know the device by pwwn. */ rport = bfa_fcs_lport_get_rport_by_pwwn(port, n2n_port->rem_port_wwn); if (rport) { bfa_trc(port->fcs, rport->pid); bfa_trc(port->fcs, rport->pwwn); rport->pid = N2N_REMOTE_PID; bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND); return; } /* * In n2n there can be only one rport. Delete the old one * whose pid should be zero, because it is offline. */ if (port->num_rports > 0) { rport = bfa_fcs_lport_get_rport_by_pid(port, 0); WARN_ON(rport == NULL); if (rport) { bfa_trc(port->fcs, rport->pwwn); bfa_sm_send_event(rport, RPSM_EVENT_DELETE); } } bfa_fcs_rport_create(port, N2N_REMOTE_PID); } } /* * Called by fcs/port to notify transition to offline state. */ static void bfa_fcs_lport_n2n_offline(struct bfa_fcs_lport_s *port) { struct bfa_fcs_lport_n2n_s *n2n_port = &port->port_topo.pn2n; bfa_trc(port->fcs, port->pid); port->pid = 0; n2n_port->rem_port_wwn = 0; n2n_port->reply_oxid = 0; } static void bfa_fcport_get_loop_attr(struct bfa_fcs_lport_s *port) { int i = 0, j = 0, bit = 0, alpa_bit = 0; u8 k = 0; struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(port->fcs->bfa); port->port_topo.ploop.alpabm_valid = fcport->alpabm_valid; port->pid = fcport->myalpa; port->pid = bfa_hton3b(port->pid); for (i = 0; i < (FC_ALPA_MAX / 8); i++) { for (j = 0, alpa_bit = 0; j < 8; j++, alpa_bit++) { bfa_trc(port->fcs->bfa, fcport->alpabm.alpa_bm[i]); bit = (fcport->alpabm.alpa_bm[i] & (1 << (7 - j))); if (bit) { port->port_topo.ploop.alpa_pos_map[k] = loop_alpa_map[(i * 8) + alpa_bit]; k++; bfa_trc(port->fcs->bfa, k); bfa_trc(port->fcs->bfa, port->port_topo.ploop.alpa_pos_map[k]); } } } port->port_topo.ploop.num_alpa = k; } /* * Called by fcs/port to initialize Loop topology. */ static void bfa_fcs_lport_loop_init(struct bfa_fcs_lport_s *port) { } /* * Called by fcs/port to notify transition to online state. */ static void bfa_fcs_lport_loop_online(struct bfa_fcs_lport_s *port) { u8 num_alpa = 0, alpabm_valid = 0; struct bfa_fcs_rport_s *rport; u8 *alpa_map = NULL; int i = 0; u32 pid; bfa_fcport_get_loop_attr(port); num_alpa = port->port_topo.ploop.num_alpa; alpabm_valid = port->port_topo.ploop.alpabm_valid; alpa_map = port->port_topo.ploop.alpa_pos_map; bfa_trc(port->fcs->bfa, port->pid); bfa_trc(port->fcs->bfa, num_alpa); if (alpabm_valid == 1) { for (i = 0; i < num_alpa; i++) { bfa_trc(port->fcs->bfa, alpa_map[i]); if (alpa_map[i] != bfa_hton3b(port->pid)) { pid = alpa_map[i]; bfa_trc(port->fcs->bfa, pid); rport = bfa_fcs_lport_get_rport_by_pid(port, bfa_hton3b(pid)); if (!rport) rport = bfa_fcs_rport_create(port, bfa_hton3b(pid)); } } } else { for (i = 0; i < MAX_ALPA_COUNT; i++) { if (alpa_map[i] != port->pid) { pid = loop_alpa_map[i]; bfa_trc(port->fcs->bfa, pid); rport = bfa_fcs_lport_get_rport_by_pid(port, bfa_hton3b(pid)); if (!rport) rport = bfa_fcs_rport_create(port, bfa_hton3b(pid)); } } } } /* * Called by fcs/port to notify transition to offline state. */ static void bfa_fcs_lport_loop_offline(struct bfa_fcs_lport_s *port) { } #define BFA_FCS_FDMI_CMD_MAX_RETRIES 2 /* * forward declarations */ static void bfa_fcs_lport_fdmi_send_rhba(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced); static void bfa_fcs_lport_fdmi_send_rprt(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced); static void bfa_fcs_lport_fdmi_send_rpa(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced); static void bfa_fcs_lport_fdmi_rhba_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs); static void bfa_fcs_lport_fdmi_rprt_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs); static void bfa_fcs_lport_fdmi_rpa_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs); static void bfa_fcs_lport_fdmi_timeout(void *arg); static int bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld); static u16 bfa_fcs_lport_fdmi_build_rprt_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld); static u16 bfa_fcs_lport_fdmi_build_rpa_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld); static u16 bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s * fdmi, u8 *pyld); static void bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi, struct bfa_fcs_fdmi_hba_attr_s *hba_attr); static void bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi, struct bfa_fcs_fdmi_port_attr_s *port_attr); u32 bfa_fcs_fdmi_convert_speed(enum bfa_port_speed pport_speed); /* * fcs_fdmi_sm FCS FDMI state machine */ /* * FDMI State Machine events */ enum port_fdmi_event { FDMISM_EVENT_PORT_ONLINE = 1, FDMISM_EVENT_PORT_OFFLINE = 2, FDMISM_EVENT_RSP_OK = 4, FDMISM_EVENT_RSP_ERROR = 5, FDMISM_EVENT_TIMEOUT = 6, FDMISM_EVENT_RHBA_SENT = 7, FDMISM_EVENT_RPRT_SENT = 8, FDMISM_EVENT_RPA_SENT = 9, }; static void bfa_fcs_lport_fdmi_sm_offline(struct bfa_fcs_lport_fdmi_s *fdmi, enum port_fdmi_event event); static void bfa_fcs_lport_fdmi_sm_sending_rhba( struct bfa_fcs_lport_fdmi_s *fdmi, enum port_fdmi_event event); static void bfa_fcs_lport_fdmi_sm_rhba(struct bfa_fcs_lport_fdmi_s *fdmi, enum port_fdmi_event event); static void bfa_fcs_lport_fdmi_sm_rhba_retry( struct bfa_fcs_lport_fdmi_s *fdmi, enum port_fdmi_event event); static void bfa_fcs_lport_fdmi_sm_sending_rprt( struct bfa_fcs_lport_fdmi_s *fdmi, enum port_fdmi_event event); static void bfa_fcs_lport_fdmi_sm_rprt(struct bfa_fcs_lport_fdmi_s *fdmi, enum port_fdmi_event event); static void bfa_fcs_lport_fdmi_sm_rprt_retry( struct bfa_fcs_lport_fdmi_s *fdmi, enum port_fdmi_event event); static void bfa_fcs_lport_fdmi_sm_sending_rpa( struct bfa_fcs_lport_fdmi_s *fdmi, enum port_fdmi_event event); static void bfa_fcs_lport_fdmi_sm_rpa(struct bfa_fcs_lport_fdmi_s *fdmi, enum port_fdmi_event event); static void bfa_fcs_lport_fdmi_sm_rpa_retry( struct bfa_fcs_lport_fdmi_s *fdmi, enum port_fdmi_event event); static void bfa_fcs_lport_fdmi_sm_online(struct bfa_fcs_lport_fdmi_s *fdmi, enum port_fdmi_event event); static void bfa_fcs_lport_fdmi_sm_disabled( struct bfa_fcs_lport_fdmi_s *fdmi, enum port_fdmi_event event); /* * Start in offline state - awaiting MS to send start. */ static void bfa_fcs_lport_fdmi_sm_offline(struct bfa_fcs_lport_fdmi_s *fdmi, enum port_fdmi_event event) { struct bfa_fcs_lport_s *port = fdmi->ms->port; bfa_trc(port->fcs, port->port_cfg.pwwn); bfa_trc(port->fcs, event); fdmi->retry_cnt = 0; switch (event) { case FDMISM_EVENT_PORT_ONLINE: if (port->vport) { /* * For Vports, register a new port. */ bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_sending_rprt); bfa_fcs_lport_fdmi_send_rprt(fdmi, NULL); } else { /* * For a base port, we should first register the HBA * attribute. The HBA attribute also contains the base * port registration. */ bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_sending_rhba); bfa_fcs_lport_fdmi_send_rhba(fdmi, NULL); } break; case FDMISM_EVENT_PORT_OFFLINE: break; default: bfa_sm_fault(port->fcs, event); } } static void bfa_fcs_lport_fdmi_sm_sending_rhba(struct bfa_fcs_lport_fdmi_s *fdmi, enum port_fdmi_event event) { struct bfa_fcs_lport_s *port = fdmi->ms->port; bfa_trc(port->fcs, port->port_cfg.pwwn); bfa_trc(port->fcs, event); switch (event) { case FDMISM_EVENT_RHBA_SENT: bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_rhba); break; case FDMISM_EVENT_PORT_OFFLINE: bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline); bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(port), &fdmi->fcxp_wqe); break; default: bfa_sm_fault(port->fcs, event); } } static void bfa_fcs_lport_fdmi_sm_rhba(struct bfa_fcs_lport_fdmi_s *fdmi, enum port_fdmi_event event) { struct bfa_fcs_lport_s *port = fdmi->ms->port; bfa_trc(port->fcs, port->port_cfg.pwwn); bfa_trc(port->fcs, event); switch (event) { case FDMISM_EVENT_RSP_ERROR: /* * if max retries have not been reached, start timer for a * delayed retry */ if (fdmi->retry_cnt++ < BFA_FCS_FDMI_CMD_MAX_RETRIES) { bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_rhba_retry); bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(port), &fdmi->timer, bfa_fcs_lport_fdmi_timeout, fdmi, BFA_FCS_RETRY_TIMEOUT); } else { /* * set state to offline */ bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline); } break; case FDMISM_EVENT_RSP_OK: /* * Initiate Register Port Attributes */ bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_sending_rpa); fdmi->retry_cnt = 0; bfa_fcs_lport_fdmi_send_rpa(fdmi, NULL); break; case FDMISM_EVENT_PORT_OFFLINE: bfa_fcxp_discard(fdmi->fcxp); bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline); break; default: bfa_sm_fault(port->fcs, event); } } static void bfa_fcs_lport_fdmi_sm_rhba_retry(struct bfa_fcs_lport_fdmi_s *fdmi, enum port_fdmi_event event) { struct bfa_fcs_lport_s *port = fdmi->ms->port; bfa_trc(port->fcs, port->port_cfg.pwwn); bfa_trc(port->fcs, event); switch (event) { case FDMISM_EVENT_TIMEOUT: /* * Retry Timer Expired. Re-send */ bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_sending_rhba); bfa_fcs_lport_fdmi_send_rhba(fdmi, NULL); break; case FDMISM_EVENT_PORT_OFFLINE: bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline); bfa_timer_stop(&fdmi->timer); break; default: bfa_sm_fault(port->fcs, event); } } /* * RPRT : Register Port */ static void bfa_fcs_lport_fdmi_sm_sending_rprt(struct bfa_fcs_lport_fdmi_s *fdmi, enum port_fdmi_event event) { struct bfa_fcs_lport_s *port = fdmi->ms->port; bfa_trc(port->fcs, port->port_cfg.pwwn); bfa_trc(port->fcs, event); switch (event) { case FDMISM_EVENT_RPRT_SENT: bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_rprt); break; case FDMISM_EVENT_PORT_OFFLINE: bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline); bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(port), &fdmi->fcxp_wqe); break; default: bfa_sm_fault(port->fcs, event); } } static void bfa_fcs_lport_fdmi_sm_rprt(struct bfa_fcs_lport_fdmi_s *fdmi, enum port_fdmi_event event) { struct bfa_fcs_lport_s *port = fdmi->ms->port; bfa_trc(port->fcs, port->port_cfg.pwwn); bfa_trc(port->fcs, event); switch (event) { case FDMISM_EVENT_RSP_ERROR: /* * if max retries have not been reached, start timer for a * delayed retry */ if (fdmi->retry_cnt++ < BFA_FCS_FDMI_CMD_MAX_RETRIES) { bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_rprt_retry); bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(port), &fdmi->timer, bfa_fcs_lport_fdmi_timeout, fdmi, BFA_FCS_RETRY_TIMEOUT); } else { /* * set state to offline */ bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline); fdmi->retry_cnt = 0; } break; case FDMISM_EVENT_RSP_OK: fdmi->retry_cnt = 0; bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_online); break; case FDMISM_EVENT_PORT_OFFLINE: bfa_fcxp_discard(fdmi->fcxp); bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline); break; default: bfa_sm_fault(port->fcs, event); } } static void bfa_fcs_lport_fdmi_sm_rprt_retry(struct bfa_fcs_lport_fdmi_s *fdmi, enum port_fdmi_event event) { struct bfa_fcs_lport_s *port = fdmi->ms->port; bfa_trc(port->fcs, port->port_cfg.pwwn); bfa_trc(port->fcs, event); switch (event) { case FDMISM_EVENT_TIMEOUT: /* * Retry Timer Expired. Re-send */ bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_sending_rprt); bfa_fcs_lport_fdmi_send_rprt(fdmi, NULL); break; case FDMISM_EVENT_PORT_OFFLINE: bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline); bfa_timer_stop(&fdmi->timer); break; default: bfa_sm_fault(port->fcs, event); } } /* * Register Port Attributes */ static void bfa_fcs_lport_fdmi_sm_sending_rpa(struct bfa_fcs_lport_fdmi_s *fdmi, enum port_fdmi_event event) { struct bfa_fcs_lport_s *port = fdmi->ms->port; bfa_trc(port->fcs, port->port_cfg.pwwn); bfa_trc(port->fcs, event); switch (event) { case FDMISM_EVENT_RPA_SENT: bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_rpa); break; case FDMISM_EVENT_PORT_OFFLINE: bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline); bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(port), &fdmi->fcxp_wqe); break; default: bfa_sm_fault(port->fcs, event); } } static void bfa_fcs_lport_fdmi_sm_rpa(struct bfa_fcs_lport_fdmi_s *fdmi, enum port_fdmi_event event) { struct bfa_fcs_lport_s *port = fdmi->ms->port; bfa_trc(port->fcs, port->port_cfg.pwwn); bfa_trc(port->fcs, event); switch (event) { case FDMISM_EVENT_RSP_ERROR: /* * if max retries have not been reached, start timer for a * delayed retry */ if (fdmi->retry_cnt++ < BFA_FCS_FDMI_CMD_MAX_RETRIES) { bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_rpa_retry); bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(port), &fdmi->timer, bfa_fcs_lport_fdmi_timeout, fdmi, BFA_FCS_RETRY_TIMEOUT); } else { /* * set state to offline */ bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline); fdmi->retry_cnt = 0; } break; case FDMISM_EVENT_RSP_OK: bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_online); fdmi->retry_cnt = 0; break; case FDMISM_EVENT_PORT_OFFLINE: bfa_fcxp_discard(fdmi->fcxp); bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline); break; default: bfa_sm_fault(port->fcs, event); } } static void bfa_fcs_lport_fdmi_sm_rpa_retry(struct bfa_fcs_lport_fdmi_s *fdmi, enum port_fdmi_event event) { struct bfa_fcs_lport_s *port = fdmi->ms->port; bfa_trc(port->fcs, port->port_cfg.pwwn); bfa_trc(port->fcs, event); switch (event) { case FDMISM_EVENT_TIMEOUT: /* * Retry Timer Expired. Re-send */ bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_sending_rpa); bfa_fcs_lport_fdmi_send_rpa(fdmi, NULL); break; case FDMISM_EVENT_PORT_OFFLINE: bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline); bfa_timer_stop(&fdmi->timer); break; default: bfa_sm_fault(port->fcs, event); } } static void bfa_fcs_lport_fdmi_sm_online(struct bfa_fcs_lport_fdmi_s *fdmi, enum port_fdmi_event event) { struct bfa_fcs_lport_s *port = fdmi->ms->port; bfa_trc(port->fcs, port->port_cfg.pwwn); bfa_trc(port->fcs, event); switch (event) { case FDMISM_EVENT_PORT_OFFLINE: bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline); break; default: bfa_sm_fault(port->fcs, event); } } /* * FDMI is disabled state. */ static void bfa_fcs_lport_fdmi_sm_disabled(struct bfa_fcs_lport_fdmi_s *fdmi, enum port_fdmi_event event) { struct bfa_fcs_lport_s *port = fdmi->ms->port; bfa_trc(port->fcs, port->port_cfg.pwwn); bfa_trc(port->fcs, event); /* No op State. It can only be enabled at Driver Init. */ } /* * RHBA : Register HBA Attributes. */ static void bfa_fcs_lport_fdmi_send_rhba(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced) { struct bfa_fcs_lport_fdmi_s *fdmi = fdmi_cbarg; struct bfa_fcs_lport_s *port = fdmi->ms->port; struct fchs_s fchs; int len, attr_len; struct bfa_fcxp_s *fcxp; u8 *pyld; bfa_trc(port->fcs, port->port_cfg.pwwn); fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE); if (!fcxp) { bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe, bfa_fcs_lport_fdmi_send_rhba, fdmi, BFA_TRUE); return; } fdmi->fcxp = fcxp; pyld = bfa_fcxp_get_reqbuf(fcxp); memset(pyld, 0, FC_MAX_PDUSZ); len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port), FDMI_RHBA); attr_len = bfa_fcs_lport_fdmi_build_rhba_pyld(fdmi, (u8 *) ((struct ct_hdr_s *) pyld + 1)); if (attr_len < 0) return; bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, (len + attr_len), &fchs, bfa_fcs_lport_fdmi_rhba_response, (void *)fdmi, FC_MAX_PDUSZ, FC_FCCT_TOV); bfa_sm_send_event(fdmi, FDMISM_EVENT_RHBA_SENT); } static int bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld) { struct bfa_fcs_lport_s *port = fdmi->ms->port; struct bfa_fcs_fdmi_hba_attr_s *fcs_hba_attr; struct fdmi_rhba_s *rhba = (struct fdmi_rhba_s *) pyld; struct fdmi_attr_s *attr; int len; u8 *curr_ptr; u16 templen, count; fcs_hba_attr = kzalloc(sizeof(*fcs_hba_attr), GFP_KERNEL); if (!fcs_hba_attr) return -ENOMEM; /* * get hba attributes */ bfa_fcs_fdmi_get_hbaattr(fdmi, fcs_hba_attr); rhba->hba_id = bfa_fcs_lport_get_pwwn(port); rhba->port_list.num_ports = cpu_to_be32(1); rhba->port_list.port_entry = bfa_fcs_lport_get_pwwn(port); len = sizeof(rhba->hba_id) + sizeof(rhba->port_list); count = 0; len += sizeof(rhba->hba_attr_blk.attr_count); /* * fill out the invididual entries of the HBA attrib Block */ curr_ptr = (u8 *) &rhba->hba_attr_blk.hba_attr; /* * Node Name */ attr = (struct fdmi_attr_s *) curr_ptr; attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_NODENAME); templen = sizeof(wwn_t); memcpy(attr->value, &bfa_fcs_lport_get_nwwn(port), templen); curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; len += templen; count++; attr->len = cpu_to_be16(templen + sizeof(attr->type) + sizeof(templen)); /* * Manufacturer */ attr = (struct fdmi_attr_s *) curr_ptr; attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MANUFACTURER); templen = (u16) strlen(fcs_hba_attr->manufacturer); memcpy(attr->value, fcs_hba_attr->manufacturer, templen); templen = fc_roundup(templen, sizeof(u32)); curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; len += templen; count++; attr->len = cpu_to_be16(templen + sizeof(attr->type) + sizeof(templen)); /* * Serial Number */ attr = (struct fdmi_attr_s *) curr_ptr; attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_SERIALNUM); templen = (u16) strlen(fcs_hba_attr->serial_num); memcpy(attr->value, fcs_hba_attr->serial_num, templen); templen = fc_roundup(templen, sizeof(u32)); curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; len += templen; count++; attr->len = cpu_to_be16(templen + sizeof(attr->type) + sizeof(templen)); /* * Model */ attr = (struct fdmi_attr_s *) curr_ptr; attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL); templen = (u16) strlen(fcs_hba_attr->model); memcpy(attr->value, fcs_hba_attr->model, templen); templen = fc_roundup(templen, sizeof(u32)); curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; len += templen; count++; attr->len = cpu_to_be16(templen + sizeof(attr->type) + sizeof(templen)); /* * Model Desc */ attr = (struct fdmi_attr_s *) curr_ptr; attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL_DESC); templen = (u16) strlen(fcs_hba_attr->model_desc); memcpy(attr->value, fcs_hba_attr->model_desc, templen); templen = fc_roundup(templen, sizeof(u32)); curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; len += templen; count++; attr->len = cpu_to_be16(templen + sizeof(attr->type) + sizeof(templen)); /* * H/W Version */ if (fcs_hba_attr->hw_version[0] != '\0') { attr = (struct fdmi_attr_s *) curr_ptr; attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_HW_VERSION); templen = (u16) strlen(fcs_hba_attr->hw_version); memcpy(attr->value, fcs_hba_attr->hw_version, templen); templen = fc_roundup(templen, sizeof(u32)); curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; len += templen; count++; attr->len = cpu_to_be16(templen + sizeof(attr->type) + sizeof(templen)); } /* * Driver Version */ attr = (struct fdmi_attr_s *) curr_ptr; attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_DRIVER_VERSION); templen = (u16) strlen(fcs_hba_attr->driver_version); memcpy(attr->value, fcs_hba_attr->driver_version, templen); templen = fc_roundup(templen, sizeof(u32)); curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; len += templen; count++; attr->len = cpu_to_be16(templen + sizeof(attr->type) + sizeof(templen)); /* * Option Rom Version */ if (fcs_hba_attr->option_rom_ver[0] != '\0') { attr = (struct fdmi_attr_s *) curr_ptr; attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_ROM_VERSION); templen = (u16) strlen(fcs_hba_attr->option_rom_ver); memcpy(attr->value, fcs_hba_attr->option_rom_ver, templen); templen = fc_roundup(templen, sizeof(u32)); curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; len += templen; count++; attr->len = cpu_to_be16(templen + sizeof(attr->type) + sizeof(templen)); } attr = (struct fdmi_attr_s *) curr_ptr; attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_FW_VERSION); templen = (u16) strlen(fcs_hba_attr->fw_version); memcpy(attr->value, fcs_hba_attr->fw_version, templen); templen = fc_roundup(templen, sizeof(u32)); curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; len += templen; count++; attr->len = cpu_to_be16(templen + sizeof(attr->type) + sizeof(templen)); /* * OS Name */ if (fcs_hba_attr->os_name[0] != '\0') { attr = (struct fdmi_attr_s *) curr_ptr; attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_OS_NAME); templen = (u16) strlen(fcs_hba_attr->os_name); memcpy(attr->value, fcs_hba_attr->os_name, templen); templen = fc_roundup(templen, sizeof(u32)); curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; len += templen; count++; attr->len = cpu_to_be16(templen + sizeof(attr->type) + sizeof(templen)); } /* * MAX_CT_PAYLOAD */ attr = (struct fdmi_attr_s *) curr_ptr; attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MAX_CT); templen = sizeof(fcs_hba_attr->max_ct_pyld); memcpy(attr->value, &fcs_hba_attr->max_ct_pyld, templen); templen = fc_roundup(templen, sizeof(u32)); curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; len += templen; count++; attr->len = cpu_to_be16(templen + sizeof(attr->type) + sizeof(templen)); /* * Send extended attributes ( FOS 7.1 support ) */ if (fdmi->retry_cnt == 0) { attr = (struct fdmi_attr_s *) curr_ptr; attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_NODE_SYM_NAME); templen = sizeof(fcs_hba_attr->node_sym_name); memcpy(attr->value, &fcs_hba_attr->node_sym_name, templen); templen = fc_roundup(templen, sizeof(u32)); curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; len += templen; count++; attr->len = cpu_to_be16(templen + sizeof(attr->type) + sizeof(templen)); attr = (struct fdmi_attr_s *) curr_ptr; attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_VENDOR_ID); templen = sizeof(fcs_hba_attr->vendor_info); memcpy(attr->value, &fcs_hba_attr->vendor_info, templen); templen = fc_roundup(templen, sizeof(u32)); curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; len += templen; count++; attr->len = cpu_to_be16(templen + sizeof(attr->type) + sizeof(templen)); attr = (struct fdmi_attr_s *) curr_ptr; attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_NUM_PORTS); templen = sizeof(fcs_hba_attr->num_ports); memcpy(attr->value, &fcs_hba_attr->num_ports, templen); templen = fc_roundup(templen, sizeof(u32)); curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; len += templen; count++; attr->len = cpu_to_be16(templen + sizeof(attr->type) + sizeof(templen)); attr = (struct fdmi_attr_s *) curr_ptr; attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_FABRIC_NAME); templen = sizeof(fcs_hba_attr->fabric_name); memcpy(attr->value, &fcs_hba_attr->fabric_name, templen); templen = fc_roundup(templen, sizeof(u32)); curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; len += templen; count++; attr->len = cpu_to_be16(templen + sizeof(attr->type) + sizeof(templen)); attr = (struct fdmi_attr_s *) curr_ptr; attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_BIOS_VER); templen = sizeof(fcs_hba_attr->bios_ver); memcpy(attr->value, &fcs_hba_attr->bios_ver, templen); templen = fc_roundup(attr->len, sizeof(u32)); curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; len += templen; count++; attr->len = cpu_to_be16(templen + sizeof(attr->type) + sizeof(templen)); } /* * Update size of payload */ len += ((sizeof(attr->type) + sizeof(attr->len)) * count); rhba->hba_attr_blk.attr_count = cpu_to_be32(count); kfree(fcs_hba_attr); return len; } static void bfa_fcs_lport_fdmi_rhba_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs) { struct bfa_fcs_lport_fdmi_s *fdmi = (struct bfa_fcs_lport_fdmi_s *) cbarg; struct bfa_fcs_lport_s *port = fdmi->ms->port; struct ct_hdr_s *cthdr = NULL; bfa_trc(port->fcs, port->port_cfg.pwwn); /* * Sanity Checks */ if (req_status != BFA_STATUS_OK) { bfa_trc(port->fcs, req_status); bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR); return; } cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK); return; } bfa_trc(port->fcs, cthdr->reason_code); bfa_trc(port->fcs, cthdr->exp_code); bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR); } /* * RPRT : Register Port */ static void bfa_fcs_lport_fdmi_send_rprt(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced) { struct bfa_fcs_lport_fdmi_s *fdmi = fdmi_cbarg; struct bfa_fcs_lport_s *port = fdmi->ms->port; struct fchs_s fchs; u16 len, attr_len; struct bfa_fcxp_s *fcxp; u8 *pyld; bfa_trc(port->fcs, port->port_cfg.pwwn); fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE); if (!fcxp) { bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe, bfa_fcs_lport_fdmi_send_rprt, fdmi, BFA_TRUE); return; } fdmi->fcxp = fcxp; pyld = bfa_fcxp_get_reqbuf(fcxp); memset(pyld, 0, FC_MAX_PDUSZ); len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port), FDMI_RPRT); attr_len = bfa_fcs_lport_fdmi_build_rprt_pyld(fdmi, (u8 *) ((struct ct_hdr_s *) pyld + 1)); bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len + attr_len, &fchs, bfa_fcs_lport_fdmi_rprt_response, (void *)fdmi, FC_MAX_PDUSZ, FC_FCCT_TOV); bfa_sm_send_event(fdmi, FDMISM_EVENT_RPRT_SENT); } /* * This routine builds Port Attribute Block that used in RPA, RPRT commands. */ static u16 bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld) { struct bfa_fcs_fdmi_port_attr_s fcs_port_attr; struct fdmi_port_attr_s *port_attrib = (struct fdmi_port_attr_s *) pyld; struct fdmi_attr_s *attr; u8 *curr_ptr; u16 len; u8 count = 0; u16 templen; /* * get port attributes */ bfa_fcs_fdmi_get_portattr(fdmi, &fcs_port_attr); len = sizeof(port_attrib->attr_count); /* * fill out the invididual entries */ curr_ptr = (u8 *) &port_attrib->port_attr; /* * FC4 Types */ attr = (struct fdmi_attr_s *) curr_ptr; attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_FC4_TYPES); templen = sizeof(fcs_port_attr.supp_fc4_types); memcpy(attr->value, fcs_port_attr.supp_fc4_types, templen); curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; len += templen; ++count; attr->len = cpu_to_be16(templen + sizeof(attr->type) + sizeof(templen)); /* * Supported Speed */ attr = (struct fdmi_attr_s *) curr_ptr; attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_SUPP_SPEED); templen = sizeof(fcs_port_attr.supp_speed); memcpy(attr->value, &fcs_port_attr.supp_speed, templen); curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; len += templen; ++count; attr->len = cpu_to_be16(templen + sizeof(attr->type) + sizeof(templen)); /* * current Port Speed */ attr = (struct fdmi_attr_s *) curr_ptr; attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_SPEED); templen = sizeof(fcs_port_attr.curr_speed); memcpy(attr->value, &fcs_port_attr.curr_speed, templen); curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; len += templen; ++count; attr->len = cpu_to_be16(templen + sizeof(attr->type) + sizeof(templen)); /* * max frame size */ attr = (struct fdmi_attr_s *) curr_ptr; attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_FRAME_SIZE); templen = sizeof(fcs_port_attr.max_frm_size); memcpy(attr->value, &fcs_port_attr.max_frm_size, templen); curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; len += templen; ++count; attr->len = cpu_to_be16(templen + sizeof(attr->type) + sizeof(templen)); /* * OS Device Name */ if (fcs_port_attr.os_device_name[0] != '\0') { attr = (struct fdmi_attr_s *) curr_ptr; attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_DEV_NAME); templen = (u16) strlen(fcs_port_attr.os_device_name); memcpy(attr->value, fcs_port_attr.os_device_name, templen); templen = fc_roundup(templen, sizeof(u32)); curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; len += templen; ++count; attr->len = cpu_to_be16(templen + sizeof(attr->type) + sizeof(templen)); } /* * Host Name */ if (fcs_port_attr.host_name[0] != '\0') { attr = (struct fdmi_attr_s *) curr_ptr; attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_HOST_NAME); templen = (u16) strlen(fcs_port_attr.host_name); memcpy(attr->value, fcs_port_attr.host_name, templen); templen = fc_roundup(templen, sizeof(u32)); curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; len += templen; ++count; attr->len = cpu_to_be16(templen + sizeof(attr->type) + sizeof(templen)); } if (fdmi->retry_cnt == 0) { attr = (struct fdmi_attr_s *) curr_ptr; attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_NODE_NAME); templen = sizeof(fcs_port_attr.node_name); memcpy(attr->value, &fcs_port_attr.node_name, templen); templen = fc_roundup(templen, sizeof(u32)); curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; len += templen; ++count; attr->len = cpu_to_be16(templen + sizeof(attr->type) + sizeof(templen)); attr = (struct fdmi_attr_s *) curr_ptr; attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_NAME); templen = sizeof(fcs_port_attr.port_name); memcpy(attr->value, &fcs_port_attr.port_name, templen); templen = fc_roundup(templen, sizeof(u32)); curr_ptr += sizeof(attr->type) + sizeof(attr->len) + templen; len += templen; ++count; attr->len = cpu_to_be16(templen + sizeof(attr->type) + sizeof(templen)); if (fcs_port_attr.port_sym_name.symname[0] != '\0') { attr = (struct fdmi_attr_s *) curr_ptr; attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_SYM_NAME); templen = sizeof(fcs_port_attr.port_sym_name); memcpy(attr->value, &fcs_port_attr.port_sym_name, templen); templen = fc_roundup(templen, sizeof(u32)); curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; len += templen; ++count; attr->len = cpu_to_be16(templen + sizeof(attr->type) + sizeof(templen)); } attr = (struct fdmi_attr_s *) curr_ptr; attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_TYPE); templen = sizeof(fcs_port_attr.port_type); memcpy(attr->value, &fcs_port_attr.port_type, templen); templen = fc_roundup(templen, sizeof(u32)); curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; len += templen; ++count; attr->len = cpu_to_be16(templen + sizeof(attr->type) + sizeof(templen)); attr = (struct fdmi_attr_s *) curr_ptr; attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_SUPP_COS); templen = sizeof(fcs_port_attr.scos); memcpy(attr->value, &fcs_port_attr.scos, templen); templen = fc_roundup(templen, sizeof(u32)); curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; len += templen; ++count; attr->len = cpu_to_be16(templen + sizeof(attr->type) + sizeof(templen)); attr = (struct fdmi_attr_s *) curr_ptr; attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_FAB_NAME); templen = sizeof(fcs_port_attr.port_fabric_name); memcpy(attr->value, &fcs_port_attr.port_fabric_name, templen); templen = fc_roundup(templen, sizeof(u32)); curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; len += templen; ++count; attr->len = cpu_to_be16(templen + sizeof(attr->type) + sizeof(templen)); attr = (struct fdmi_attr_s *) curr_ptr; attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_FC4_TYPE); templen = sizeof(fcs_port_attr.port_act_fc4_type); memcpy(attr->value, fcs_port_attr.port_act_fc4_type, templen); templen = fc_roundup(templen, sizeof(u32)); curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; len += templen; ++count; attr->len = cpu_to_be16(templen + sizeof(attr->type) + sizeof(templen)); attr = (struct fdmi_attr_s *) curr_ptr; attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_STATE); templen = sizeof(fcs_port_attr.port_state); memcpy(attr->value, &fcs_port_attr.port_state, templen); templen = fc_roundup(templen, sizeof(u32)); curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; len += templen; ++count; attr->len = cpu_to_be16(templen + sizeof(attr->type) + sizeof(templen)); attr = (struct fdmi_attr_s *) curr_ptr; attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_NUM_RPRT); templen = sizeof(fcs_port_attr.num_ports); memcpy(attr->value, &fcs_port_attr.num_ports, templen); templen = fc_roundup(templen, sizeof(u32)); curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; len += templen; ++count; attr->len = cpu_to_be16(templen + sizeof(attr->type) + sizeof(templen)); } /* * Update size of payload */ port_attrib->attr_count = cpu_to_be32(count); len += ((sizeof(attr->type) + sizeof(attr->len)) * count); return len; } static u16 bfa_fcs_lport_fdmi_build_rprt_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld) { struct bfa_fcs_lport_s *port = fdmi->ms->port; struct fdmi_rprt_s *rprt = (struct fdmi_rprt_s *) pyld; u16 len; rprt->hba_id = bfa_fcs_lport_get_pwwn(bfa_fcs_get_base_port(port->fcs)); rprt->port_name = bfa_fcs_lport_get_pwwn(port); len = bfa_fcs_lport_fdmi_build_portattr_block(fdmi, (u8 *) &rprt->port_attr_blk); len += sizeof(rprt->hba_id) + sizeof(rprt->port_name); return len; } static void bfa_fcs_lport_fdmi_rprt_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs) { struct bfa_fcs_lport_fdmi_s *fdmi = (struct bfa_fcs_lport_fdmi_s *) cbarg; struct bfa_fcs_lport_s *port = fdmi->ms->port; struct ct_hdr_s *cthdr = NULL; bfa_trc(port->fcs, port->port_cfg.pwwn); /* * Sanity Checks */ if (req_status != BFA_STATUS_OK) { bfa_trc(port->fcs, req_status); bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR); return; } cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK); return; } bfa_trc(port->fcs, cthdr->reason_code); bfa_trc(port->fcs, cthdr->exp_code); bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR); } /* * RPA : Register Port Attributes. */ static void bfa_fcs_lport_fdmi_send_rpa(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced) { struct bfa_fcs_lport_fdmi_s *fdmi = fdmi_cbarg; struct bfa_fcs_lport_s *port = fdmi->ms->port; struct fchs_s fchs; u16 len, attr_len; struct bfa_fcxp_s *fcxp; u8 *pyld; bfa_trc(port->fcs, port->port_cfg.pwwn); fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE); if (!fcxp) { bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe, bfa_fcs_lport_fdmi_send_rpa, fdmi, BFA_TRUE); return; } fdmi->fcxp = fcxp; pyld = bfa_fcxp_get_reqbuf(fcxp); memset(pyld, 0, FC_MAX_PDUSZ); len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port), FDMI_RPA); attr_len = bfa_fcs_lport_fdmi_build_rpa_pyld(fdmi, (u8 *) ((struct ct_hdr_s *) pyld + 1)); bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len + attr_len, &fchs, bfa_fcs_lport_fdmi_rpa_response, (void *)fdmi, FC_MAX_PDUSZ, FC_FCCT_TOV); bfa_sm_send_event(fdmi, FDMISM_EVENT_RPA_SENT); } static u16 bfa_fcs_lport_fdmi_build_rpa_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld) { struct bfa_fcs_lport_s *port = fdmi->ms->port; struct fdmi_rpa_s *rpa = (struct fdmi_rpa_s *) pyld; u16 len; rpa->port_name = bfa_fcs_lport_get_pwwn(port); len = bfa_fcs_lport_fdmi_build_portattr_block(fdmi, (u8 *) &rpa->port_attr_blk); len += sizeof(rpa->port_name); return len; } static void bfa_fcs_lport_fdmi_rpa_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs) { struct bfa_fcs_lport_fdmi_s *fdmi = (struct bfa_fcs_lport_fdmi_s *) cbarg; struct bfa_fcs_lport_s *port = fdmi->ms->port; struct ct_hdr_s *cthdr = NULL; bfa_trc(port->fcs, port->port_cfg.pwwn); /* * Sanity Checks */ if (req_status != BFA_STATUS_OK) { bfa_trc(port->fcs, req_status); bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR); return; } cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK); return; } bfa_trc(port->fcs, cthdr->reason_code); bfa_trc(port->fcs, cthdr->exp_code); bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR); } static void bfa_fcs_lport_fdmi_timeout(void *arg) { struct bfa_fcs_lport_fdmi_s *fdmi = (struct bfa_fcs_lport_fdmi_s *) arg; bfa_sm_send_event(fdmi, FDMISM_EVENT_TIMEOUT); } static void bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi, struct bfa_fcs_fdmi_hba_attr_s *hba_attr) { struct bfa_fcs_lport_s *port = fdmi->ms->port; struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info; struct bfa_fcs_fdmi_port_attr_s fcs_port_attr; memset(hba_attr, 0, sizeof(struct bfa_fcs_fdmi_hba_attr_s)); bfa_ioc_get_adapter_manufacturer(&port->fcs->bfa->ioc, hba_attr->manufacturer); bfa_ioc_get_adapter_serial_num(&port->fcs->bfa->ioc, hba_attr->serial_num); bfa_ioc_get_adapter_model(&port->fcs->bfa->ioc, hba_attr->model); bfa_ioc_get_adapter_model(&port->fcs->bfa->ioc, hba_attr->model_desc); bfa_ioc_get_pci_chip_rev(&port->fcs->bfa->ioc, hba_attr->hw_version); bfa_ioc_get_adapter_optrom_ver(&port->fcs->bfa->ioc, hba_attr->option_rom_ver); bfa_ioc_get_adapter_fw_ver(&port->fcs->bfa->ioc, hba_attr->fw_version); strscpy(hba_attr->driver_version, (char *)driver_info->version, sizeof(hba_attr->driver_version)); strscpy(hba_attr->os_name, driver_info->host_os_name, sizeof(hba_attr->os_name)); /* * If there is a patch level, append it * to the os name along with a separator */ if (driver_info->host_os_patch[0] != '\0') { strlcat(hba_attr->os_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR, sizeof(hba_attr->os_name)); strlcat(hba_attr->os_name, driver_info->host_os_patch, sizeof(hba_attr->os_name)); } /* Retrieve the max frame size from the port attr */ bfa_fcs_fdmi_get_portattr(fdmi, &fcs_port_attr); hba_attr->max_ct_pyld = fcs_port_attr.max_frm_size; strscpy(hba_attr->node_sym_name.symname, port->port_cfg.node_sym_name.symname, BFA_SYMNAME_MAXLEN); strcpy(hba_attr->vendor_info, "QLogic"); hba_attr->num_ports = cpu_to_be32(bfa_ioc_get_nports(&port->fcs->bfa->ioc)); hba_attr->fabric_name = port->fabric->lps->pr_nwwn; strscpy(hba_attr->bios_ver, hba_attr->option_rom_ver, BFA_VERSION_LEN); } static void bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi, struct bfa_fcs_fdmi_port_attr_s *port_attr) { struct bfa_fcs_lport_s *port = fdmi->ms->port; struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info; struct bfa_port_attr_s pport_attr; struct bfa_lport_attr_s lport_attr; memset(port_attr, 0, sizeof(struct bfa_fcs_fdmi_port_attr_s)); /* * get pport attributes from hal */ bfa_fcport_get_attr(port->fcs->bfa, &pport_attr); /* * get FC4 type Bitmask */ fc_get_fc4type_bitmask(FC_TYPE_FCP, port_attr->supp_fc4_types); /* * Supported Speeds */ switch (pport_attr.speed_supported) { case BFA_PORT_SPEED_16GBPS: port_attr->supp_speed = cpu_to_be32(BFA_FCS_FDMI_SUPP_SPEEDS_16G); break; case BFA_PORT_SPEED_10GBPS: port_attr->supp_speed = cpu_to_be32(BFA_FCS_FDMI_SUPP_SPEEDS_10G); break; case BFA_PORT_SPEED_8GBPS: port_attr->supp_speed = cpu_to_be32(BFA_FCS_FDMI_SUPP_SPEEDS_8G); break; case BFA_PORT_SPEED_4GBPS: port_attr->supp_speed = cpu_to_be32(BFA_FCS_FDMI_SUPP_SPEEDS_4G); break; default: bfa_sm_fault(port->fcs, pport_attr.speed_supported); } /* * Current Speed */ port_attr->curr_speed = cpu_to_be32( bfa_fcs_fdmi_convert_speed(pport_attr.speed)); /* * Max PDU Size. */ port_attr->max_frm_size = cpu_to_be32(pport_attr.pport_cfg.maxfrsize); /* * OS device Name */ strscpy(port_attr->os_device_name, driver_info->os_device_name, sizeof(port_attr->os_device_name)); /* * Host name */ strscpy(port_attr->host_name, driver_info->host_machine_name, sizeof(port_attr->host_name)); port_attr->node_name = bfa_fcs_lport_get_nwwn(port); port_attr->port_name = bfa_fcs_lport_get_pwwn(port); strscpy(port_attr->port_sym_name.symname, bfa_fcs_lport_get_psym_name(port).symname, BFA_SYMNAME_MAXLEN); bfa_fcs_lport_get_attr(port, &lport_attr); port_attr->port_type = cpu_to_be32(lport_attr.port_type); port_attr->scos = pport_attr.cos_supported; port_attr->port_fabric_name = port->fabric->lps->pr_nwwn; fc_get_fc4type_bitmask(FC_TYPE_FCP, port_attr->port_act_fc4_type); port_attr->port_state = cpu_to_be32(pport_attr.port_state); port_attr->num_ports = cpu_to_be32(port->num_rports); } /* * Convert BFA speed to FDMI format. */ u32 bfa_fcs_fdmi_convert_speed(bfa_port_speed_t pport_speed) { u32 ret; switch (pport_speed) { case BFA_PORT_SPEED_1GBPS: case BFA_PORT_SPEED_2GBPS: ret = pport_speed; break; case BFA_PORT_SPEED_4GBPS: ret = FDMI_TRANS_SPEED_4G; break; case BFA_PORT_SPEED_8GBPS: ret = FDMI_TRANS_SPEED_8G; break; case BFA_PORT_SPEED_10GBPS: ret = FDMI_TRANS_SPEED_10G; break; case BFA_PORT_SPEED_16GBPS: ret = FDMI_TRANS_SPEED_16G; break; default: ret = FDMI_TRANS_SPEED_UNKNOWN; } return ret; } void bfa_fcs_lport_fdmi_init(struct bfa_fcs_lport_ms_s *ms) { struct bfa_fcs_lport_fdmi_s *fdmi = &ms->fdmi; fdmi->ms = ms; if (ms->port->fcs->fdmi_enabled) bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline); else bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_disabled); } void bfa_fcs_lport_fdmi_offline(struct bfa_fcs_lport_ms_s *ms) { struct bfa_fcs_lport_fdmi_s *fdmi = &ms->fdmi; fdmi->ms = ms; bfa_sm_send_event(fdmi, FDMISM_EVENT_PORT_OFFLINE); } void bfa_fcs_lport_fdmi_online(struct bfa_fcs_lport_ms_s *ms) { struct bfa_fcs_lport_fdmi_s *fdmi = &ms->fdmi; fdmi->ms = ms; bfa_sm_send_event(fdmi, FDMISM_EVENT_PORT_ONLINE); } #define BFA_FCS_MS_CMD_MAX_RETRIES 2 /* * forward declarations */ static void bfa_fcs_lport_ms_send_plogi(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced); static void bfa_fcs_lport_ms_timeout(void *arg); static void bfa_fcs_lport_ms_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs); static void bfa_fcs_lport_ms_send_gmal(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced); static void bfa_fcs_lport_ms_gmal_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs); static void bfa_fcs_lport_ms_send_gfn(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced); static void bfa_fcs_lport_ms_gfn_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs); /* * fcs_ms_sm FCS MS state machine */ /* * MS State Machine events */ enum port_ms_event { MSSM_EVENT_PORT_ONLINE = 1, MSSM_EVENT_PORT_OFFLINE = 2, MSSM_EVENT_RSP_OK = 3, MSSM_EVENT_RSP_ERROR = 4, MSSM_EVENT_TIMEOUT = 5, MSSM_EVENT_FCXP_SENT = 6, MSSM_EVENT_PORT_FABRIC_RSCN = 7 }; static void bfa_fcs_lport_ms_sm_offline(struct bfa_fcs_lport_ms_s *ms, enum port_ms_event event); static void bfa_fcs_lport_ms_sm_plogi_sending(struct bfa_fcs_lport_ms_s *ms, enum port_ms_event event); static void bfa_fcs_lport_ms_sm_plogi(struct bfa_fcs_lport_ms_s *ms, enum port_ms_event event); static void bfa_fcs_lport_ms_sm_plogi_retry(struct bfa_fcs_lport_ms_s *ms, enum port_ms_event event); static void bfa_fcs_lport_ms_sm_gmal_sending(struct bfa_fcs_lport_ms_s *ms, enum port_ms_event event); static void bfa_fcs_lport_ms_sm_gmal(struct bfa_fcs_lport_ms_s *ms, enum port_ms_event event); static void bfa_fcs_lport_ms_sm_gmal_retry(struct bfa_fcs_lport_ms_s *ms, enum port_ms_event event); static void bfa_fcs_lport_ms_sm_gfn_sending(struct bfa_fcs_lport_ms_s *ms, enum port_ms_event event); static void bfa_fcs_lport_ms_sm_gfn(struct bfa_fcs_lport_ms_s *ms, enum port_ms_event event); static void bfa_fcs_lport_ms_sm_gfn_retry(struct bfa_fcs_lport_ms_s *ms, enum port_ms_event event); static void bfa_fcs_lport_ms_sm_online(struct bfa_fcs_lport_ms_s *ms, enum port_ms_event event); /* * Start in offline state - awaiting NS to send start. */ static void bfa_fcs_lport_ms_sm_offline(struct bfa_fcs_lport_ms_s *ms, enum port_ms_event event) { bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); bfa_trc(ms->port->fcs, event); switch (event) { case MSSM_EVENT_PORT_ONLINE: bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_plogi_sending); bfa_fcs_lport_ms_send_plogi(ms, NULL); break; case MSSM_EVENT_PORT_OFFLINE: break; default: bfa_sm_fault(ms->port->fcs, event); } } static void bfa_fcs_lport_ms_sm_plogi_sending(struct bfa_fcs_lport_ms_s *ms, enum port_ms_event event) { bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); bfa_trc(ms->port->fcs, event); switch (event) { case MSSM_EVENT_FCXP_SENT: bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_plogi); break; case MSSM_EVENT_PORT_OFFLINE: bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline); bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ms->port), &ms->fcxp_wqe); break; default: bfa_sm_fault(ms->port->fcs, event); } } static void bfa_fcs_lport_ms_sm_plogi(struct bfa_fcs_lport_ms_s *ms, enum port_ms_event event) { bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); bfa_trc(ms->port->fcs, event); switch (event) { case MSSM_EVENT_RSP_ERROR: /* * Start timer for a delayed retry */ bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_plogi_retry); ms->port->stats.ms_retries++; bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port), &ms->timer, bfa_fcs_lport_ms_timeout, ms, BFA_FCS_RETRY_TIMEOUT); break; case MSSM_EVENT_RSP_OK: /* * since plogi is done, now invoke MS related sub-modules */ bfa_fcs_lport_fdmi_online(ms); /* * if this is a Vport, go to online state. */ if (ms->port->vport) { bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_online); break; } /* * For a base port we need to get the * switch's IP address. */ bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gmal_sending); bfa_fcs_lport_ms_send_gmal(ms, NULL); break; case MSSM_EVENT_PORT_OFFLINE: bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline); bfa_fcxp_discard(ms->fcxp); break; default: bfa_sm_fault(ms->port->fcs, event); } } static void bfa_fcs_lport_ms_sm_plogi_retry(struct bfa_fcs_lport_ms_s *ms, enum port_ms_event event) { bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); bfa_trc(ms->port->fcs, event); switch (event) { case MSSM_EVENT_TIMEOUT: /* * Retry Timer Expired. Re-send */ bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_plogi_sending); bfa_fcs_lport_ms_send_plogi(ms, NULL); break; case MSSM_EVENT_PORT_OFFLINE: bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline); bfa_timer_stop(&ms->timer); break; default: bfa_sm_fault(ms->port->fcs, event); } } static void bfa_fcs_lport_ms_sm_online(struct bfa_fcs_lport_ms_s *ms, enum port_ms_event event) { bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); bfa_trc(ms->port->fcs, event); switch (event) { case MSSM_EVENT_PORT_OFFLINE: bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline); break; case MSSM_EVENT_PORT_FABRIC_RSCN: bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn_sending); ms->retry_cnt = 0; bfa_fcs_lport_ms_send_gfn(ms, NULL); break; default: bfa_sm_fault(ms->port->fcs, event); } } static void bfa_fcs_lport_ms_sm_gmal_sending(struct bfa_fcs_lport_ms_s *ms, enum port_ms_event event) { bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); bfa_trc(ms->port->fcs, event); switch (event) { case MSSM_EVENT_FCXP_SENT: bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gmal); break; case MSSM_EVENT_PORT_OFFLINE: bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline); bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ms->port), &ms->fcxp_wqe); break; default: bfa_sm_fault(ms->port->fcs, event); } } static void bfa_fcs_lport_ms_sm_gmal(struct bfa_fcs_lport_ms_s *ms, enum port_ms_event event) { bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); bfa_trc(ms->port->fcs, event); switch (event) { case MSSM_EVENT_RSP_ERROR: /* * Start timer for a delayed retry */ if (ms->retry_cnt++ < BFA_FCS_MS_CMD_MAX_RETRIES) { bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gmal_retry); ms->port->stats.ms_retries++; bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port), &ms->timer, bfa_fcs_lport_ms_timeout, ms, BFA_FCS_RETRY_TIMEOUT); } else { bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn_sending); bfa_fcs_lport_ms_send_gfn(ms, NULL); ms->retry_cnt = 0; } break; case MSSM_EVENT_RSP_OK: bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn_sending); bfa_fcs_lport_ms_send_gfn(ms, NULL); break; case MSSM_EVENT_PORT_OFFLINE: bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline); bfa_fcxp_discard(ms->fcxp); break; default: bfa_sm_fault(ms->port->fcs, event); } } static void bfa_fcs_lport_ms_sm_gmal_retry(struct bfa_fcs_lport_ms_s *ms, enum port_ms_event event) { bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); bfa_trc(ms->port->fcs, event); switch (event) { case MSSM_EVENT_TIMEOUT: /* * Retry Timer Expired. Re-send */ bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gmal_sending); bfa_fcs_lport_ms_send_gmal(ms, NULL); break; case MSSM_EVENT_PORT_OFFLINE: bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline); bfa_timer_stop(&ms->timer); break; default: bfa_sm_fault(ms->port->fcs, event); } } /* * ms_pvt MS local functions */ static void bfa_fcs_lport_ms_send_gmal(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced) { struct bfa_fcs_lport_ms_s *ms = ms_cbarg; bfa_fcs_lport_t *port = ms->port; struct fchs_s fchs; int len; struct bfa_fcxp_s *fcxp; bfa_trc(port->fcs, port->pid); fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE); if (!fcxp) { bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe, bfa_fcs_lport_ms_send_gmal, ms, BFA_TRUE); return; } ms->fcxp = fcxp; len = fc_gmal_req_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), bfa_fcs_lport_get_fcid(port), port->fabric->lps->pr_nwwn); bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, bfa_fcs_lport_ms_gmal_response, (void *)ms, FC_MAX_PDUSZ, FC_FCCT_TOV); bfa_sm_send_event(ms, MSSM_EVENT_FCXP_SENT); } static void bfa_fcs_lport_ms_gmal_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs) { struct bfa_fcs_lport_ms_s *ms = (struct bfa_fcs_lport_ms_s *) cbarg; bfa_fcs_lport_t *port = ms->port; struct ct_hdr_s *cthdr = NULL; struct fcgs_gmal_resp_s *gmal_resp; struct fcgs_gmal_entry_s *gmal_entry; u32 num_entries; u8 *rsp_str; bfa_trc(port->fcs, req_status); bfa_trc(port->fcs, port->port_cfg.pwwn); /* * Sanity Checks */ if (req_status != BFA_STATUS_OK) { bfa_trc(port->fcs, req_status); bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); return; } cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { gmal_resp = (struct fcgs_gmal_resp_s *)(cthdr + 1); num_entries = be32_to_cpu(gmal_resp->ms_len); if (num_entries == 0) { bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); return; } /* * The response could contain multiple Entries. * Entries for SNMP interface, etc. * We look for the entry with a telnet prefix. * First "http://" entry refers to IP addr */ gmal_entry = (struct fcgs_gmal_entry_s *)gmal_resp->ms_ma; while (num_entries > 0) { if (strncmp(gmal_entry->prefix, CT_GMAL_RESP_PREFIX_HTTP, sizeof(gmal_entry->prefix)) == 0) { /* * if the IP address is terminating with a '/', * remove it. * Byte 0 consists of the length of the string. */ rsp_str = &(gmal_entry->prefix[0]); if (rsp_str[gmal_entry->len-1] == '/') rsp_str[gmal_entry->len-1] = 0; /* copy IP Address to fabric */ strscpy(bfa_fcs_lport_get_fabric_ipaddr(port), gmal_entry->ip_addr, BFA_FCS_FABRIC_IPADDR_SZ); break; } else { --num_entries; ++gmal_entry; } } bfa_sm_send_event(ms, MSSM_EVENT_RSP_OK); return; } bfa_trc(port->fcs, cthdr->reason_code); bfa_trc(port->fcs, cthdr->exp_code); bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); } static void bfa_fcs_lport_ms_sm_gfn_sending(struct bfa_fcs_lport_ms_s *ms, enum port_ms_event event) { bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); bfa_trc(ms->port->fcs, event); switch (event) { case MSSM_EVENT_FCXP_SENT: bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn); break; case MSSM_EVENT_PORT_OFFLINE: bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline); bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ms->port), &ms->fcxp_wqe); break; default: bfa_sm_fault(ms->port->fcs, event); } } static void bfa_fcs_lport_ms_sm_gfn(struct bfa_fcs_lport_ms_s *ms, enum port_ms_event event) { bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); bfa_trc(ms->port->fcs, event); switch (event) { case MSSM_EVENT_RSP_ERROR: /* * Start timer for a delayed retry */ if (ms->retry_cnt++ < BFA_FCS_MS_CMD_MAX_RETRIES) { bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn_retry); ms->port->stats.ms_retries++; bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port), &ms->timer, bfa_fcs_lport_ms_timeout, ms, BFA_FCS_RETRY_TIMEOUT); } else { bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_online); ms->retry_cnt = 0; } break; case MSSM_EVENT_RSP_OK: bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_online); break; case MSSM_EVENT_PORT_OFFLINE: bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline); bfa_fcxp_discard(ms->fcxp); break; default: bfa_sm_fault(ms->port->fcs, event); } } static void bfa_fcs_lport_ms_sm_gfn_retry(struct bfa_fcs_lport_ms_s *ms, enum port_ms_event event) { bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); bfa_trc(ms->port->fcs, event); switch (event) { case MSSM_EVENT_TIMEOUT: /* * Retry Timer Expired. Re-send */ bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn_sending); bfa_fcs_lport_ms_send_gfn(ms, NULL); break; case MSSM_EVENT_PORT_OFFLINE: bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline); bfa_timer_stop(&ms->timer); break; default: bfa_sm_fault(ms->port->fcs, event); } } /* * ms_pvt MS local functions */ static void bfa_fcs_lport_ms_send_gfn(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced) { struct bfa_fcs_lport_ms_s *ms = ms_cbarg; bfa_fcs_lport_t *port = ms->port; struct fchs_s fchs; int len; struct bfa_fcxp_s *fcxp; bfa_trc(port->fcs, port->pid); fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE); if (!fcxp) { bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe, bfa_fcs_lport_ms_send_gfn, ms, BFA_TRUE); return; } ms->fcxp = fcxp; len = fc_gfn_req_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), bfa_fcs_lport_get_fcid(port), port->fabric->lps->pr_nwwn); bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, bfa_fcs_lport_ms_gfn_response, (void *)ms, FC_MAX_PDUSZ, FC_FCCT_TOV); bfa_sm_send_event(ms, MSSM_EVENT_FCXP_SENT); } static void bfa_fcs_lport_ms_gfn_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs) { struct bfa_fcs_lport_ms_s *ms = (struct bfa_fcs_lport_ms_s *) cbarg; bfa_fcs_lport_t *port = ms->port; struct ct_hdr_s *cthdr = NULL; wwn_t *gfn_resp; bfa_trc(port->fcs, req_status); bfa_trc(port->fcs, port->port_cfg.pwwn); /* * Sanity Checks */ if (req_status != BFA_STATUS_OK) { bfa_trc(port->fcs, req_status); bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); return; } cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { gfn_resp = (wwn_t *)(cthdr + 1); /* check if it has actually changed */ if ((memcmp((void *)&bfa_fcs_lport_get_fabric_name(port), gfn_resp, sizeof(wwn_t)) != 0)) { bfa_fcs_fabric_set_fabric_name(port->fabric, *gfn_resp); } bfa_sm_send_event(ms, MSSM_EVENT_RSP_OK); return; } bfa_trc(port->fcs, cthdr->reason_code); bfa_trc(port->fcs, cthdr->exp_code); bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); } /* * ms_pvt MS local functions */ static void bfa_fcs_lport_ms_send_plogi(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced) { struct bfa_fcs_lport_ms_s *ms = ms_cbarg; struct bfa_fcs_lport_s *port = ms->port; struct fchs_s fchs; int len; struct bfa_fcxp_s *fcxp; bfa_trc(port->fcs, port->pid); fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE); if (!fcxp) { port->stats.ms_plogi_alloc_wait++; bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe, bfa_fcs_lport_ms_send_plogi, ms, BFA_TRUE); return; } ms->fcxp = fcxp; len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), bfa_hton3b(FC_MGMT_SERVER), bfa_fcs_lport_get_fcid(port), 0, port->port_cfg.pwwn, port->port_cfg.nwwn, bfa_fcport_get_maxfrsize(port->fcs->bfa), bfa_fcport_get_rx_bbcredit(port->fcs->bfa)); bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, bfa_fcs_lport_ms_plogi_response, (void *)ms, FC_MAX_PDUSZ, FC_ELS_TOV); port->stats.ms_plogi_sent++; bfa_sm_send_event(ms, MSSM_EVENT_FCXP_SENT); } static void bfa_fcs_lport_ms_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs) { struct bfa_fcs_lport_ms_s *ms = (struct bfa_fcs_lport_ms_s *) cbarg; struct bfa_fcs_lport_s *port = ms->port; struct fc_els_cmd_s *els_cmd; struct fc_ls_rjt_s *ls_rjt; bfa_trc(port->fcs, req_status); bfa_trc(port->fcs, port->port_cfg.pwwn); /* * Sanity Checks */ if (req_status != BFA_STATUS_OK) { port->stats.ms_plogi_rsp_err++; bfa_trc(port->fcs, req_status); bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); return; } els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp); switch (els_cmd->els_code) { case FC_ELS_ACC: if (rsp_len < sizeof(struct fc_logi_s)) { bfa_trc(port->fcs, rsp_len); port->stats.ms_plogi_acc_err++; bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); break; } port->stats.ms_plogi_accepts++; bfa_sm_send_event(ms, MSSM_EVENT_RSP_OK); break; case FC_ELS_LS_RJT: ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp); bfa_trc(port->fcs, ls_rjt->reason_code); bfa_trc(port->fcs, ls_rjt->reason_code_expl); port->stats.ms_rejects++; bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); break; default: port->stats.ms_plogi_unknown_rsp++; bfa_trc(port->fcs, els_cmd->els_code); bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); } } static void bfa_fcs_lport_ms_timeout(void *arg) { struct bfa_fcs_lport_ms_s *ms = (struct bfa_fcs_lport_ms_s *) arg; ms->port->stats.ms_timeouts++; bfa_sm_send_event(ms, MSSM_EVENT_TIMEOUT); } void bfa_fcs_lport_ms_init(struct bfa_fcs_lport_s *port) { struct bfa_fcs_lport_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port); ms->port = port; bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline); /* * Invoke init routines of sub modules. */ bfa_fcs_lport_fdmi_init(ms); } void bfa_fcs_lport_ms_offline(struct bfa_fcs_lport_s *port) { struct bfa_fcs_lport_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port); ms->port = port; bfa_sm_send_event(ms, MSSM_EVENT_PORT_OFFLINE); bfa_fcs_lport_fdmi_offline(ms); } void bfa_fcs_lport_ms_online(struct bfa_fcs_lport_s *port) { struct bfa_fcs_lport_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port); ms->port = port; bfa_sm_send_event(ms, MSSM_EVENT_PORT_ONLINE); } void bfa_fcs_lport_ms_fabric_rscn(struct bfa_fcs_lport_s *port) { struct bfa_fcs_lport_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port); /* todo. Handle this only when in Online state */ if (bfa_sm_cmp_state(ms, bfa_fcs_lport_ms_sm_online)) bfa_sm_send_event(ms, MSSM_EVENT_PORT_FABRIC_RSCN); } /* * @page ns_sm_info VPORT NS State Machine * * @section ns_sm_interactions VPORT NS State Machine Interactions * * @section ns_sm VPORT NS State Machine * img ns_sm.jpg */ /* * forward declarations */ static void bfa_fcs_lport_ns_send_plogi(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced); static void bfa_fcs_lport_ns_send_rspn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced); static void bfa_fcs_lport_ns_send_rft_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced); static void bfa_fcs_lport_ns_send_rff_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced); static void bfa_fcs_lport_ns_send_gid_ft(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced); static void bfa_fcs_lport_ns_send_rnn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced); static void bfa_fcs_lport_ns_send_rsnn_nn(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced); static void bfa_fcs_lport_ns_timeout(void *arg); static void bfa_fcs_lport_ns_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs); static void bfa_fcs_lport_ns_rspn_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs); static void bfa_fcs_lport_ns_rft_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs); static void bfa_fcs_lport_ns_rff_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs); static void bfa_fcs_lport_ns_gid_ft_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs); static void bfa_fcs_lport_ns_rnn_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs); static void bfa_fcs_lport_ns_rsnn_nn_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs); static void bfa_fcs_lport_ns_process_gidft_pids( struct bfa_fcs_lport_s *port, u32 *pid_buf, u32 n_pids); static void bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port); /* * fcs_ns_sm FCS nameserver interface state machine */ /* * VPort NS State Machine events */ enum vport_ns_event { NSSM_EVENT_PORT_ONLINE = 1, NSSM_EVENT_PORT_OFFLINE = 2, NSSM_EVENT_PLOGI_SENT = 3, NSSM_EVENT_RSP_OK = 4, NSSM_EVENT_RSP_ERROR = 5, NSSM_EVENT_TIMEOUT = 6, NSSM_EVENT_NS_QUERY = 7, NSSM_EVENT_RSPNID_SENT = 8, NSSM_EVENT_RFTID_SENT = 9, NSSM_EVENT_RFFID_SENT = 10, NSSM_EVENT_GIDFT_SENT = 11, NSSM_EVENT_RNNID_SENT = 12, NSSM_EVENT_RSNN_NN_SENT = 13, }; static void bfa_fcs_lport_ns_sm_offline(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event); static void bfa_fcs_lport_ns_sm_plogi_sending(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event); static void bfa_fcs_lport_ns_sm_plogi(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event); static void bfa_fcs_lport_ns_sm_plogi_retry(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event); static void bfa_fcs_lport_ns_sm_sending_rspn_id( struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event); static void bfa_fcs_lport_ns_sm_rspn_id(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event); static void bfa_fcs_lport_ns_sm_rspn_id_retry(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event); static void bfa_fcs_lport_ns_sm_sending_rft_id( struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event); static void bfa_fcs_lport_ns_sm_rft_id_retry(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event); static void bfa_fcs_lport_ns_sm_rft_id(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event); static void bfa_fcs_lport_ns_sm_sending_rff_id( struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event); static void bfa_fcs_lport_ns_sm_rff_id_retry(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event); static void bfa_fcs_lport_ns_sm_rff_id(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event); static void bfa_fcs_lport_ns_sm_sending_gid_ft( struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event); static void bfa_fcs_lport_ns_sm_gid_ft(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event); static void bfa_fcs_lport_ns_sm_gid_ft_retry(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event); static void bfa_fcs_lport_ns_sm_online(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event); static void bfa_fcs_lport_ns_sm_sending_rnn_id( struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event); static void bfa_fcs_lport_ns_sm_rnn_id(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event); static void bfa_fcs_lport_ns_sm_rnn_id_retry(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event); static void bfa_fcs_lport_ns_sm_sending_rsnn_nn( struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event); static void bfa_fcs_lport_ns_sm_rsnn_nn(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event); static void bfa_fcs_lport_ns_sm_rsnn_nn_retry( struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event); /* * Start in offline state - awaiting linkup */ static void bfa_fcs_lport_ns_sm_offline(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) { bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); bfa_trc(ns->port->fcs, event); switch (event) { case NSSM_EVENT_PORT_ONLINE: bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_plogi_sending); bfa_fcs_lport_ns_send_plogi(ns, NULL); break; case NSSM_EVENT_PORT_OFFLINE: break; default: bfa_sm_fault(ns->port->fcs, event); } } static void bfa_fcs_lport_ns_sm_plogi_sending(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) { bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); bfa_trc(ns->port->fcs, event); switch (event) { case NSSM_EVENT_PLOGI_SENT: bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_plogi); break; case NSSM_EVENT_PORT_OFFLINE: bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port), &ns->fcxp_wqe); break; default: bfa_sm_fault(ns->port->fcs, event); } } static void bfa_fcs_lport_ns_sm_plogi(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) { bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); bfa_trc(ns->port->fcs, event); switch (event) { case NSSM_EVENT_RSP_ERROR: /* * Start timer for a delayed retry */ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_plogi_retry); ns->port->stats.ns_retries++; bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), &ns->timer, bfa_fcs_lport_ns_timeout, ns, BFA_FCS_RETRY_TIMEOUT); break; case NSSM_EVENT_RSP_OK: bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rnn_id); ns->num_rnnid_retries = 0; bfa_fcs_lport_ns_send_rnn_id(ns, NULL); break; case NSSM_EVENT_PORT_OFFLINE: bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); bfa_fcxp_discard(ns->fcxp); break; default: bfa_sm_fault(ns->port->fcs, event); } } static void bfa_fcs_lport_ns_sm_plogi_retry(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) { bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); bfa_trc(ns->port->fcs, event); switch (event) { case NSSM_EVENT_TIMEOUT: /* * Retry Timer Expired. Re-send */ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_plogi_sending); bfa_fcs_lport_ns_send_plogi(ns, NULL); break; case NSSM_EVENT_PORT_OFFLINE: bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); bfa_timer_stop(&ns->timer); break; default: bfa_sm_fault(ns->port->fcs, event); } } static void bfa_fcs_lport_ns_sm_sending_rnn_id(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) { bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); bfa_trc(ns->port->fcs, event); switch (event) { case NSSM_EVENT_RNNID_SENT: bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rnn_id); break; case NSSM_EVENT_PORT_OFFLINE: bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port), &ns->fcxp_wqe); break; default: bfa_sm_fault(ns->port->fcs, event); } } static void bfa_fcs_lport_ns_sm_rnn_id(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) { bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); bfa_trc(ns->port->fcs, event); switch (event) { case NSSM_EVENT_RSP_OK: bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rsnn_nn); ns->num_rnnid_retries = 0; ns->num_rsnn_nn_retries = 0; bfa_fcs_lport_ns_send_rsnn_nn(ns, NULL); break; case NSSM_EVENT_RSP_ERROR: if (ns->num_rnnid_retries < BFA_FCS_MAX_NS_RETRIES) { bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rnn_id_retry); ns->port->stats.ns_retries++; ns->num_rnnid_retries++; bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), &ns->timer, bfa_fcs_lport_ns_timeout, ns, BFA_FCS_RETRY_TIMEOUT); } else { bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rspn_id); bfa_fcs_lport_ns_send_rspn_id(ns, NULL); } break; case NSSM_EVENT_PORT_OFFLINE: bfa_fcxp_discard(ns->fcxp); bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); break; default: bfa_sm_fault(ns->port->fcs, event); } } static void bfa_fcs_lport_ns_sm_rnn_id_retry(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) { bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); bfa_trc(ns->port->fcs, event); switch (event) { case NSSM_EVENT_TIMEOUT: bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rnn_id); bfa_fcs_lport_ns_send_rnn_id(ns, NULL); break; case NSSM_EVENT_PORT_OFFLINE: bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); bfa_timer_stop(&ns->timer); break; default: bfa_sm_fault(ns->port->fcs, event); } } static void bfa_fcs_lport_ns_sm_sending_rsnn_nn(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) { bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); bfa_trc(ns->port->fcs, event); switch (event) { case NSSM_EVENT_RSNN_NN_SENT: bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rsnn_nn); break; case NSSM_EVENT_PORT_OFFLINE: bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port), &ns->fcxp_wqe); break; default: bfa_sm_fault(ns->port->fcs, event); } } static void bfa_fcs_lport_ns_sm_rsnn_nn(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) { bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); bfa_trc(ns->port->fcs, event); switch (event) { case NSSM_EVENT_RSP_OK: bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rspn_id); ns->num_rsnn_nn_retries = 0; bfa_fcs_lport_ns_send_rspn_id(ns, NULL); break; case NSSM_EVENT_RSP_ERROR: if (ns->num_rsnn_nn_retries < BFA_FCS_MAX_NS_RETRIES) { bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rsnn_nn_retry); ns->port->stats.ns_retries++; ns->num_rsnn_nn_retries++; bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), &ns->timer, bfa_fcs_lport_ns_timeout, ns, BFA_FCS_RETRY_TIMEOUT); } else { bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rspn_id); bfa_fcs_lport_ns_send_rspn_id(ns, NULL); } break; case NSSM_EVENT_PORT_OFFLINE: bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); bfa_fcxp_discard(ns->fcxp); break; default: bfa_sm_fault(ns->port->fcs, event); } } static void bfa_fcs_lport_ns_sm_rsnn_nn_retry(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) { bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); bfa_trc(ns->port->fcs, event); switch (event) { case NSSM_EVENT_TIMEOUT: bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rsnn_nn); bfa_fcs_lport_ns_send_rsnn_nn(ns, NULL); break; case NSSM_EVENT_PORT_OFFLINE: bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); bfa_timer_stop(&ns->timer); break; default: bfa_sm_fault(ns->port->fcs, event); } } static void bfa_fcs_lport_ns_sm_sending_rspn_id(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) { bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); bfa_trc(ns->port->fcs, event); switch (event) { case NSSM_EVENT_RSPNID_SENT: bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rspn_id); break; case NSSM_EVENT_PORT_OFFLINE: bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port), &ns->fcxp_wqe); break; default: bfa_sm_fault(ns->port->fcs, event); } } static void bfa_fcs_lport_ns_sm_rspn_id(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) { bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); bfa_trc(ns->port->fcs, event); switch (event) { case NSSM_EVENT_RSP_ERROR: /* * Start timer for a delayed retry */ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rspn_id_retry); ns->port->stats.ns_retries++; bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), &ns->timer, bfa_fcs_lport_ns_timeout, ns, BFA_FCS_RETRY_TIMEOUT); break; case NSSM_EVENT_RSP_OK: bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rft_id); bfa_fcs_lport_ns_send_rft_id(ns, NULL); break; case NSSM_EVENT_PORT_OFFLINE: bfa_fcxp_discard(ns->fcxp); bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); break; default: bfa_sm_fault(ns->port->fcs, event); } } static void bfa_fcs_lport_ns_sm_rspn_id_retry(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) { bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); bfa_trc(ns->port->fcs, event); switch (event) { case NSSM_EVENT_TIMEOUT: /* * Retry Timer Expired. Re-send */ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rspn_id); bfa_fcs_lport_ns_send_rspn_id(ns, NULL); break; case NSSM_EVENT_PORT_OFFLINE: bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); bfa_timer_stop(&ns->timer); break; default: bfa_sm_fault(ns->port->fcs, event); } } static void bfa_fcs_lport_ns_sm_sending_rft_id(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) { bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); bfa_trc(ns->port->fcs, event); switch (event) { case NSSM_EVENT_RFTID_SENT: bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rft_id); break; case NSSM_EVENT_PORT_OFFLINE: bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port), &ns->fcxp_wqe); break; default: bfa_sm_fault(ns->port->fcs, event); } } static void bfa_fcs_lport_ns_sm_rft_id(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) { bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); bfa_trc(ns->port->fcs, event); switch (event) { case NSSM_EVENT_RSP_OK: /* Now move to register FC4 Features */ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rff_id); bfa_fcs_lport_ns_send_rff_id(ns, NULL); break; case NSSM_EVENT_RSP_ERROR: /* * Start timer for a delayed retry */ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rft_id_retry); ns->port->stats.ns_retries++; bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), &ns->timer, bfa_fcs_lport_ns_timeout, ns, BFA_FCS_RETRY_TIMEOUT); break; case NSSM_EVENT_PORT_OFFLINE: bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); bfa_fcxp_discard(ns->fcxp); break; default: bfa_sm_fault(ns->port->fcs, event); } } static void bfa_fcs_lport_ns_sm_rft_id_retry(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) { bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); bfa_trc(ns->port->fcs, event); switch (event) { case NSSM_EVENT_TIMEOUT: bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rft_id); bfa_fcs_lport_ns_send_rft_id(ns, NULL); break; case NSSM_EVENT_PORT_OFFLINE: bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); bfa_timer_stop(&ns->timer); break; default: bfa_sm_fault(ns->port->fcs, event); } } static void bfa_fcs_lport_ns_sm_sending_rff_id(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) { bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); bfa_trc(ns->port->fcs, event); switch (event) { case NSSM_EVENT_RFFID_SENT: bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rff_id); break; case NSSM_EVENT_PORT_OFFLINE: bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port), &ns->fcxp_wqe); break; default: bfa_sm_fault(ns->port->fcs, event); } } static void bfa_fcs_lport_ns_sm_rff_id(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) { bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); bfa_trc(ns->port->fcs, event); switch (event) { case NSSM_EVENT_RSP_OK: /* * If min cfg mode is enabled, we donot initiate rport * discovery with the fabric. Instead, we will retrieve the * boot targets from HAL/FW. */ if (__fcs_min_cfg(ns->port->fcs)) { bfa_fcs_lport_ns_boot_target_disc(ns->port); bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_online); return; } /* * If the port role is Initiator Mode issue NS query. * If it is Target Mode, skip this and go to online. */ if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port)) { bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_gid_ft); bfa_fcs_lport_ns_send_gid_ft(ns, NULL); } /* * kick off mgmt srvr state machine */ bfa_fcs_lport_ms_online(ns->port); break; case NSSM_EVENT_RSP_ERROR: /* * Start timer for a delayed retry */ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rff_id_retry); ns->port->stats.ns_retries++; bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), &ns->timer, bfa_fcs_lport_ns_timeout, ns, BFA_FCS_RETRY_TIMEOUT); break; case NSSM_EVENT_PORT_OFFLINE: bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); bfa_fcxp_discard(ns->fcxp); break; default: bfa_sm_fault(ns->port->fcs, event); } } static void bfa_fcs_lport_ns_sm_rff_id_retry(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) { bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); bfa_trc(ns->port->fcs, event); switch (event) { case NSSM_EVENT_TIMEOUT: bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rff_id); bfa_fcs_lport_ns_send_rff_id(ns, NULL); break; case NSSM_EVENT_PORT_OFFLINE: bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); bfa_timer_stop(&ns->timer); break; default: bfa_sm_fault(ns->port->fcs, event); } } static void bfa_fcs_lport_ns_sm_sending_gid_ft(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) { bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); bfa_trc(ns->port->fcs, event); switch (event) { case NSSM_EVENT_GIDFT_SENT: bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_gid_ft); break; case NSSM_EVENT_PORT_OFFLINE: bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port), &ns->fcxp_wqe); break; default: bfa_sm_fault(ns->port->fcs, event); } } static void bfa_fcs_lport_ns_sm_gid_ft(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) { bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); bfa_trc(ns->port->fcs, event); switch (event) { case NSSM_EVENT_RSP_OK: bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_online); break; case NSSM_EVENT_RSP_ERROR: /* * TBD: for certain reject codes, we don't need to retry */ /* * Start timer for a delayed retry */ bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_gid_ft_retry); ns->port->stats.ns_retries++; bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), &ns->timer, bfa_fcs_lport_ns_timeout, ns, BFA_FCS_RETRY_TIMEOUT); break; case NSSM_EVENT_PORT_OFFLINE: bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); bfa_fcxp_discard(ns->fcxp); break; case NSSM_EVENT_NS_QUERY: break; default: bfa_sm_fault(ns->port->fcs, event); } } static void bfa_fcs_lport_ns_sm_gid_ft_retry(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) { bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); bfa_trc(ns->port->fcs, event); switch (event) { case NSSM_EVENT_TIMEOUT: bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_gid_ft); bfa_fcs_lport_ns_send_gid_ft(ns, NULL); break; case NSSM_EVENT_PORT_OFFLINE: bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); bfa_timer_stop(&ns->timer); break; default: bfa_sm_fault(ns->port->fcs, event); } } static void bfa_fcs_lport_ns_sm_online(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event) { bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); bfa_trc(ns->port->fcs, event); switch (event) { case NSSM_EVENT_PORT_OFFLINE: bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); break; case NSSM_EVENT_NS_QUERY: /* * If the port role is Initiator Mode issue NS query. * If it is Target Mode, skip this and go to online. */ if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port)) { bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_gid_ft); bfa_fcs_lport_ns_send_gid_ft(ns, NULL); } break; default: bfa_sm_fault(ns->port->fcs, event); } } /* * ns_pvt Nameserver local functions */ static void bfa_fcs_lport_ns_send_plogi(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced) { struct bfa_fcs_lport_ns_s *ns = ns_cbarg; struct bfa_fcs_lport_s *port = ns->port; struct fchs_s fchs; int len; struct bfa_fcxp_s *fcxp; bfa_trc(port->fcs, port->pid); fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE); if (!fcxp) { port->stats.ns_plogi_alloc_wait++; bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe, bfa_fcs_lport_ns_send_plogi, ns, BFA_TRUE); return; } ns->fcxp = fcxp; len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), bfa_hton3b(FC_NAME_SERVER), bfa_fcs_lport_get_fcid(port), 0, port->port_cfg.pwwn, port->port_cfg.nwwn, bfa_fcport_get_maxfrsize(port->fcs->bfa), bfa_fcport_get_rx_bbcredit(port->fcs->bfa)); bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, bfa_fcs_lport_ns_plogi_response, (void *)ns, FC_MAX_PDUSZ, FC_ELS_TOV); port->stats.ns_plogi_sent++; bfa_sm_send_event(ns, NSSM_EVENT_PLOGI_SENT); } static void bfa_fcs_lport_ns_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs) { struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg; struct bfa_fcs_lport_s *port = ns->port; /* struct fc_logi_s *plogi_resp; */ struct fc_els_cmd_s *els_cmd; struct fc_ls_rjt_s *ls_rjt; bfa_trc(port->fcs, req_status); bfa_trc(port->fcs, port->port_cfg.pwwn); /* * Sanity Checks */ if (req_status != BFA_STATUS_OK) { bfa_trc(port->fcs, req_status); port->stats.ns_plogi_rsp_err++; bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); return; } els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp); switch (els_cmd->els_code) { case FC_ELS_ACC: if (rsp_len < sizeof(struct fc_logi_s)) { bfa_trc(port->fcs, rsp_len); port->stats.ns_plogi_acc_err++; bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); break; } port->stats.ns_plogi_accepts++; bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK); break; case FC_ELS_LS_RJT: ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp); bfa_trc(port->fcs, ls_rjt->reason_code); bfa_trc(port->fcs, ls_rjt->reason_code_expl); port->stats.ns_rejects++; bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); break; default: port->stats.ns_plogi_unknown_rsp++; bfa_trc(port->fcs, els_cmd->els_code); bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); } } /* * Register node name for port_id */ static void bfa_fcs_lport_ns_send_rnn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced) { struct bfa_fcs_lport_ns_s *ns = ns_cbarg; struct bfa_fcs_lport_s *port = ns->port; struct fchs_s fchs; int len; struct bfa_fcxp_s *fcxp; bfa_trc(port->fcs, port->port_cfg.pwwn); fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE); if (!fcxp) { port->stats.ns_rnnid_alloc_wait++; bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe, bfa_fcs_lport_ns_send_rnn_id, ns, BFA_TRUE); return; } ns->fcxp = fcxp; len = fc_rnnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), bfa_fcs_lport_get_fcid(port), bfa_fcs_lport_get_fcid(port), bfa_fcs_lport_get_nwwn(port)); bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, bfa_fcs_lport_ns_rnn_id_response, (void *)ns, FC_MAX_PDUSZ, FC_FCCT_TOV); port->stats.ns_rnnid_sent++; bfa_sm_send_event(ns, NSSM_EVENT_RNNID_SENT); } static void bfa_fcs_lport_ns_rnn_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs) { struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg; struct bfa_fcs_lport_s *port = ns->port; struct ct_hdr_s *cthdr = NULL; bfa_trc(port->fcs, port->port_cfg.pwwn); /* * Sanity Checks */ if (req_status != BFA_STATUS_OK) { bfa_trc(port->fcs, req_status); port->stats.ns_rnnid_rsp_err++; bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); return; } cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { port->stats.ns_rnnid_accepts++; bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK); return; } port->stats.ns_rnnid_rejects++; bfa_trc(port->fcs, cthdr->reason_code); bfa_trc(port->fcs, cthdr->exp_code); bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); } /* * Register the symbolic node name for a given node name. */ static void bfa_fcs_lport_ns_send_rsnn_nn(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced) { struct bfa_fcs_lport_ns_s *ns = ns_cbarg; struct bfa_fcs_lport_s *port = ns->port; struct fchs_s fchs; int len; struct bfa_fcxp_s *fcxp; u8 *nsymbl; bfa_trc(port->fcs, port->port_cfg.pwwn); fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE); if (!fcxp) { port->stats.ns_rsnn_nn_alloc_wait++; bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe, bfa_fcs_lport_ns_send_rsnn_nn, ns, BFA_TRUE); return; } ns->fcxp = fcxp; nsymbl = (u8 *) &(bfa_fcs_lport_get_nsym_name( bfa_fcs_get_base_port(port->fcs))); len = fc_rsnn_nn_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), bfa_fcs_lport_get_fcid(port), bfa_fcs_lport_get_nwwn(port), nsymbl); bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, bfa_fcs_lport_ns_rsnn_nn_response, (void *)ns, FC_MAX_PDUSZ, FC_FCCT_TOV); port->stats.ns_rsnn_nn_sent++; bfa_sm_send_event(ns, NSSM_EVENT_RSNN_NN_SENT); } static void bfa_fcs_lport_ns_rsnn_nn_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs) { struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg; struct bfa_fcs_lport_s *port = ns->port; struct ct_hdr_s *cthdr = NULL; bfa_trc(port->fcs, port->port_cfg.pwwn); /* * Sanity Checks */ if (req_status != BFA_STATUS_OK) { bfa_trc(port->fcs, req_status); port->stats.ns_rsnn_nn_rsp_err++; bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); return; } cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { port->stats.ns_rsnn_nn_accepts++; bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK); return; } port->stats.ns_rsnn_nn_rejects++; bfa_trc(port->fcs, cthdr->reason_code); bfa_trc(port->fcs, cthdr->exp_code); bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); } /* * Register the symbolic port name. */ static void bfa_fcs_lport_ns_send_rspn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced) { struct bfa_fcs_lport_ns_s *ns = ns_cbarg; struct bfa_fcs_lport_s *port = ns->port; struct fchs_s fchs; int len; struct bfa_fcxp_s *fcxp; u8 symbl[256]; u8 *psymbl = &symbl[0]; memset(symbl, 0, sizeof(symbl)); bfa_trc(port->fcs, port->port_cfg.pwwn); fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE); if (!fcxp) { port->stats.ns_rspnid_alloc_wait++; bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe, bfa_fcs_lport_ns_send_rspn_id, ns, BFA_TRUE); return; } ns->fcxp = fcxp; /* * for V-Port, form a Port Symbolic Name */ if (port->vport) { /* * For Vports, we append the vport's port symbolic name * to that of the base port. */ strscpy(symbl, (char *)&(bfa_fcs_lport_get_psym_name (bfa_fcs_get_base_port(port->fcs))), sizeof(symbl)); strlcat(symbl, (char *)&(bfa_fcs_lport_get_psym_name(port)), sizeof(symbl)); } else { psymbl = (u8 *) &(bfa_fcs_lport_get_psym_name(port)); } len = fc_rspnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), bfa_fcs_lport_get_fcid(port), 0, psymbl); bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, bfa_fcs_lport_ns_rspn_id_response, (void *)ns, FC_MAX_PDUSZ, FC_FCCT_TOV); port->stats.ns_rspnid_sent++; bfa_sm_send_event(ns, NSSM_EVENT_RSPNID_SENT); } static void bfa_fcs_lport_ns_rspn_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs) { struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg; struct bfa_fcs_lport_s *port = ns->port; struct ct_hdr_s *cthdr = NULL; bfa_trc(port->fcs, port->port_cfg.pwwn); /* * Sanity Checks */ if (req_status != BFA_STATUS_OK) { bfa_trc(port->fcs, req_status); port->stats.ns_rspnid_rsp_err++; bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); return; } cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { port->stats.ns_rspnid_accepts++; bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK); return; } port->stats.ns_rspnid_rejects++; bfa_trc(port->fcs, cthdr->reason_code); bfa_trc(port->fcs, cthdr->exp_code); bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); } /* * Register FC4-Types */ static void bfa_fcs_lport_ns_send_rft_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced) { struct bfa_fcs_lport_ns_s *ns = ns_cbarg; struct bfa_fcs_lport_s *port = ns->port; struct fchs_s fchs; int len; struct bfa_fcxp_s *fcxp; bfa_trc(port->fcs, port->port_cfg.pwwn); fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE); if (!fcxp) { port->stats.ns_rftid_alloc_wait++; bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe, bfa_fcs_lport_ns_send_rft_id, ns, BFA_TRUE); return; } ns->fcxp = fcxp; len = fc_rftid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), bfa_fcs_lport_get_fcid(port), 0, port->port_cfg.roles); bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, bfa_fcs_lport_ns_rft_id_response, (void *)ns, FC_MAX_PDUSZ, FC_FCCT_TOV); port->stats.ns_rftid_sent++; bfa_sm_send_event(ns, NSSM_EVENT_RFTID_SENT); } static void bfa_fcs_lport_ns_rft_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs) { struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg; struct bfa_fcs_lport_s *port = ns->port; struct ct_hdr_s *cthdr = NULL; bfa_trc(port->fcs, port->port_cfg.pwwn); /* * Sanity Checks */ if (req_status != BFA_STATUS_OK) { bfa_trc(port->fcs, req_status); port->stats.ns_rftid_rsp_err++; bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); return; } cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { port->stats.ns_rftid_accepts++; bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK); return; } port->stats.ns_rftid_rejects++; bfa_trc(port->fcs, cthdr->reason_code); bfa_trc(port->fcs, cthdr->exp_code); bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); } /* * Register FC4-Features : Should be done after RFT_ID */ static void bfa_fcs_lport_ns_send_rff_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced) { struct bfa_fcs_lport_ns_s *ns = ns_cbarg; struct bfa_fcs_lport_s *port = ns->port; struct fchs_s fchs; int len; struct bfa_fcxp_s *fcxp; u8 fc4_ftrs = 0; bfa_trc(port->fcs, port->port_cfg.pwwn); fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE); if (!fcxp) { port->stats.ns_rffid_alloc_wait++; bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe, bfa_fcs_lport_ns_send_rff_id, ns, BFA_TRUE); return; } ns->fcxp = fcxp; if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port)) fc4_ftrs = FC_GS_FCP_FC4_FEATURE_INITIATOR; len = fc_rffid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), bfa_fcs_lport_get_fcid(port), 0, FC_TYPE_FCP, fc4_ftrs); bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, bfa_fcs_lport_ns_rff_id_response, (void *)ns, FC_MAX_PDUSZ, FC_FCCT_TOV); port->stats.ns_rffid_sent++; bfa_sm_send_event(ns, NSSM_EVENT_RFFID_SENT); } static void bfa_fcs_lport_ns_rff_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs) { struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg; struct bfa_fcs_lport_s *port = ns->port; struct ct_hdr_s *cthdr = NULL; bfa_trc(port->fcs, port->port_cfg.pwwn); /* * Sanity Checks */ if (req_status != BFA_STATUS_OK) { bfa_trc(port->fcs, req_status); port->stats.ns_rffid_rsp_err++; bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); return; } cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { port->stats.ns_rffid_accepts++; bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK); return; } port->stats.ns_rffid_rejects++; bfa_trc(port->fcs, cthdr->reason_code); bfa_trc(port->fcs, cthdr->exp_code); if (cthdr->reason_code == CT_RSN_NOT_SUPP) { /* if this command is not supported, we don't retry */ bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK); } else bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); } /* * Query Fabric for FC4-Types Devices. * * TBD : Need to use a local (FCS private) response buffer, since the response * can be larger than 2K. */ static void bfa_fcs_lport_ns_send_gid_ft(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced) { struct bfa_fcs_lport_ns_s *ns = ns_cbarg; struct bfa_fcs_lport_s *port = ns->port; struct fchs_s fchs; int len; struct bfa_fcxp_s *fcxp; bfa_trc(port->fcs, port->pid); fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE); if (!fcxp) { port->stats.ns_gidft_alloc_wait++; bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe, bfa_fcs_lport_ns_send_gid_ft, ns, BFA_TRUE); return; } ns->fcxp = fcxp; /* * This query is only initiated for FCP initiator mode. */ len = fc_gid_ft_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), ns->port->pid, FC_TYPE_FCP); bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, bfa_fcs_lport_ns_gid_ft_response, (void *)ns, bfa_fcxp_get_maxrsp(port->fcs->bfa), FC_FCCT_TOV); port->stats.ns_gidft_sent++; bfa_sm_send_event(ns, NSSM_EVENT_GIDFT_SENT); } static void bfa_fcs_lport_ns_gid_ft_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs) { struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg; struct bfa_fcs_lport_s *port = ns->port; struct ct_hdr_s *cthdr = NULL; u32 n_pids; bfa_trc(port->fcs, port->port_cfg.pwwn); /* * Sanity Checks */ if (req_status != BFA_STATUS_OK) { bfa_trc(port->fcs, req_status); port->stats.ns_gidft_rsp_err++; bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); return; } if (resid_len != 0) { /* * TBD : we will need to allocate a larger buffer & retry the * command */ bfa_trc(port->fcs, rsp_len); bfa_trc(port->fcs, resid_len); return; } cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); switch (cthdr->cmd_rsp_code) { case CT_RSP_ACCEPT: port->stats.ns_gidft_accepts++; n_pids = (fc_get_ctresp_pyld_len(rsp_len) / sizeof(u32)); bfa_trc(port->fcs, n_pids); bfa_fcs_lport_ns_process_gidft_pids(port, (u32 *) (cthdr + 1), n_pids); bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK); break; case CT_RSP_REJECT: /* * Check the reason code & explanation. * There may not have been any FC4 devices in the fabric */ port->stats.ns_gidft_rejects++; bfa_trc(port->fcs, cthdr->reason_code); bfa_trc(port->fcs, cthdr->exp_code); if ((cthdr->reason_code == CT_RSN_UNABLE_TO_PERF) && (cthdr->exp_code == CT_NS_EXP_FT_NOT_REG)) { bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK); } else { /* * for all other errors, retry */ bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); } break; default: port->stats.ns_gidft_unknown_rsp++; bfa_trc(port->fcs, cthdr->cmd_rsp_code); bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); } } /* * This routine will be called by bfa_timer on timer timeouts. * * param[in] port - pointer to bfa_fcs_lport_t. * * return * void * * Special Considerations: * * note */ static void bfa_fcs_lport_ns_timeout(void *arg) { struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) arg; ns->port->stats.ns_timeouts++; bfa_sm_send_event(ns, NSSM_EVENT_TIMEOUT); } /* * Process the PID list in GID_FT response */ static void bfa_fcs_lport_ns_process_gidft_pids(struct bfa_fcs_lport_s *port, u32 *pid_buf, u32 n_pids) { struct fcgs_gidft_resp_s *gidft_entry; struct bfa_fcs_rport_s *rport; u32 ii; struct bfa_fcs_fabric_s *fabric = port->fabric; struct bfa_fcs_vport_s *vport; struct list_head *qe; u8 found = 0; for (ii = 0; ii < n_pids; ii++) { gidft_entry = (struct fcgs_gidft_resp_s *) &pid_buf[ii]; if (gidft_entry->pid == port->pid) continue; /* * Ignore PID if it is of base port * (Avoid vports discovering base port as remote port) */ if (gidft_entry->pid == fabric->bport.pid) continue; /* * Ignore PID if it is of vport created on the same base port * (Avoid vport discovering every other vport created on the * same port as remote port) */ list_for_each(qe, &fabric->vport_q) { vport = (struct bfa_fcs_vport_s *) qe; if (vport->lport.pid == gidft_entry->pid) found = 1; } if (found) { found = 0; continue; } /* * Check if this rport already exists */ rport = bfa_fcs_lport_get_rport_by_pid(port, gidft_entry->pid); if (rport == NULL) { /* * this is a new device. create rport */ rport = bfa_fcs_rport_create(port, gidft_entry->pid); } else { /* * this rport already exists */ bfa_fcs_rport_scn(rport); } bfa_trc(port->fcs, gidft_entry->pid); /* * if the last entry bit is set, bail out. */ if (gidft_entry->last) return; } } /* * fcs_ns_public FCS nameserver public interfaces */ /* * Functions called by port/fab. * These will send relevant Events to the ns state machine. */ void bfa_fcs_lport_ns_init(struct bfa_fcs_lport_s *port) { struct bfa_fcs_lport_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port); ns->port = port; bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); } void bfa_fcs_lport_ns_offline(struct bfa_fcs_lport_s *port) { struct bfa_fcs_lport_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port); ns->port = port; bfa_sm_send_event(ns, NSSM_EVENT_PORT_OFFLINE); } void bfa_fcs_lport_ns_online(struct bfa_fcs_lport_s *port) { struct bfa_fcs_lport_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port); ns->port = port; bfa_sm_send_event(ns, NSSM_EVENT_PORT_ONLINE); } void bfa_fcs_lport_ns_query(struct bfa_fcs_lport_s *port) { struct bfa_fcs_lport_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port); bfa_trc(port->fcs, port->pid); if (bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_online)) bfa_sm_send_event(ns, NSSM_EVENT_NS_QUERY); } static void bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port) { struct bfa_fcs_rport_s *rport; u8 nwwns; wwn_t wwns[BFA_PREBOOT_BOOTLUN_MAX]; int ii; bfa_iocfc_get_bootwwns(port->fcs->bfa, &nwwns, wwns); for (ii = 0 ; ii < nwwns; ++ii) { rport = bfa_fcs_rport_create_by_wwn(port, wwns[ii]); WARN_ON(!rport); } } void bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg, struct bfa_fcxp_s *fcxp_alloced) { struct bfa_fcs_lport_ns_s *ns = cbarg; struct bfa_fcs_lport_s *port = ns->port; struct fchs_s fchs; struct bfa_fcxp_s *fcxp; u8 symbl[256]; int len; /* Avoid sending RSPN in the following states. */ if (bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_offline) || bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_plogi_sending) || bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_plogi) || bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_plogi_retry) || bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_rspn_id_retry)) return; memset(symbl, 0, sizeof(symbl)); bfa_trc(port->fcs, port->port_cfg.pwwn); fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE); if (!fcxp) { port->stats.ns_rspnid_alloc_wait++; bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe, bfa_fcs_lport_ns_util_send_rspn_id, ns, BFA_FALSE); return; } ns->fcxp = fcxp; if (port->vport) { /* * For Vports, we append the vport's port symbolic name * to that of the base port. */ strscpy(symbl, (char *)&(bfa_fcs_lport_get_psym_name (bfa_fcs_get_base_port(port->fcs))), sizeof(symbl)); strlcat(symbl, (char *)&(bfa_fcs_lport_get_psym_name(port)), sizeof(symbl)); } len = fc_rspnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), bfa_fcs_lport_get_fcid(port), 0, symbl); bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); port->stats.ns_rspnid_sent++; } /* * FCS SCN */ #define FC_QOS_RSCN_EVENT 0x0c #define FC_FABRIC_NAME_RSCN_EVENT 0x0d /* * forward declarations */ static void bfa_fcs_lport_scn_send_scr(void *scn_cbarg, struct bfa_fcxp_s *fcxp_alloced); static void bfa_fcs_lport_scn_scr_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs); static void bfa_fcs_lport_scn_send_ls_acc(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs); static void bfa_fcs_lport_scn_timeout(void *arg); /* * fcs_scm_sm FCS SCN state machine */ /* * VPort SCN State Machine events */ enum port_scn_event { SCNSM_EVENT_PORT_ONLINE = 1, SCNSM_EVENT_PORT_OFFLINE = 2, SCNSM_EVENT_RSP_OK = 3, SCNSM_EVENT_RSP_ERROR = 4, SCNSM_EVENT_TIMEOUT = 5, SCNSM_EVENT_SCR_SENT = 6, }; static void bfa_fcs_lport_scn_sm_offline(struct bfa_fcs_lport_scn_s *scn, enum port_scn_event event); static void bfa_fcs_lport_scn_sm_sending_scr( struct bfa_fcs_lport_scn_s *scn, enum port_scn_event event); static void bfa_fcs_lport_scn_sm_scr(struct bfa_fcs_lport_scn_s *scn, enum port_scn_event event); static void bfa_fcs_lport_scn_sm_scr_retry(struct bfa_fcs_lport_scn_s *scn, enum port_scn_event event); static void bfa_fcs_lport_scn_sm_online(struct bfa_fcs_lport_scn_s *scn, enum port_scn_event event); /* * Starting state - awaiting link up. */ static void bfa_fcs_lport_scn_sm_offline(struct bfa_fcs_lport_scn_s *scn, enum port_scn_event event) { switch (event) { case SCNSM_EVENT_PORT_ONLINE: bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_sending_scr); bfa_fcs_lport_scn_send_scr(scn, NULL); break; case SCNSM_EVENT_PORT_OFFLINE: break; default: bfa_sm_fault(scn->port->fcs, event); } } static void bfa_fcs_lport_scn_sm_sending_scr(struct bfa_fcs_lport_scn_s *scn, enum port_scn_event event) { switch (event) { case SCNSM_EVENT_SCR_SENT: bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_scr); break; case SCNSM_EVENT_PORT_OFFLINE: bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_offline); bfa_fcxp_walloc_cancel(scn->port->fcs->bfa, &scn->fcxp_wqe); break; default: bfa_sm_fault(scn->port->fcs, event); } } static void bfa_fcs_lport_scn_sm_scr(struct bfa_fcs_lport_scn_s *scn, enum port_scn_event event) { struct bfa_fcs_lport_s *port = scn->port; switch (event) { case SCNSM_EVENT_RSP_OK: bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_online); break; case SCNSM_EVENT_RSP_ERROR: bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_scr_retry); bfa_timer_start(port->fcs->bfa, &scn->timer, bfa_fcs_lport_scn_timeout, scn, BFA_FCS_RETRY_TIMEOUT); break; case SCNSM_EVENT_PORT_OFFLINE: bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_offline); bfa_fcxp_discard(scn->fcxp); break; default: bfa_sm_fault(port->fcs, event); } } static void bfa_fcs_lport_scn_sm_scr_retry(struct bfa_fcs_lport_scn_s *scn, enum port_scn_event event) { switch (event) { case SCNSM_EVENT_TIMEOUT: bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_sending_scr); bfa_fcs_lport_scn_send_scr(scn, NULL); break; case SCNSM_EVENT_PORT_OFFLINE: bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_offline); bfa_timer_stop(&scn->timer); break; default: bfa_sm_fault(scn->port->fcs, event); } } static void bfa_fcs_lport_scn_sm_online(struct bfa_fcs_lport_scn_s *scn, enum port_scn_event event) { switch (event) { case SCNSM_EVENT_PORT_OFFLINE: bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_offline); break; default: bfa_sm_fault(scn->port->fcs, event); } } /* * fcs_scn_private FCS SCN private functions */ /* * This routine will be called to send a SCR command. */ static void bfa_fcs_lport_scn_send_scr(void *scn_cbarg, struct bfa_fcxp_s *fcxp_alloced) { struct bfa_fcs_lport_scn_s *scn = scn_cbarg; struct bfa_fcs_lport_s *port = scn->port; struct fchs_s fchs; int len; struct bfa_fcxp_s *fcxp; bfa_trc(port->fcs, port->pid); bfa_trc(port->fcs, port->port_cfg.pwwn); fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE); if (!fcxp) { bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &scn->fcxp_wqe, bfa_fcs_lport_scn_send_scr, scn, BFA_TRUE); return; } scn->fcxp = fcxp; /* Handle VU registrations for Base port only */ if ((!port->vport) && bfa_ioc_get_fcmode(&port->fcs->bfa->ioc)) { len = fc_scr_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), port->fabric->lps->brcd_switch, port->pid, 0); } else { len = fc_scr_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), BFA_FALSE, port->pid, 0); } bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, bfa_fcs_lport_scn_scr_response, (void *)scn, FC_MAX_PDUSZ, FC_ELS_TOV); bfa_sm_send_event(scn, SCNSM_EVENT_SCR_SENT); } static void bfa_fcs_lport_scn_scr_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs) { struct bfa_fcs_lport_scn_s *scn = (struct bfa_fcs_lport_scn_s *) cbarg; struct bfa_fcs_lport_s *port = scn->port; struct fc_els_cmd_s *els_cmd; struct fc_ls_rjt_s *ls_rjt; bfa_trc(port->fcs, port->port_cfg.pwwn); /* * Sanity Checks */ if (req_status != BFA_STATUS_OK) { bfa_trc(port->fcs, req_status); bfa_sm_send_event(scn, SCNSM_EVENT_RSP_ERROR); return; } els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp); switch (els_cmd->els_code) { case FC_ELS_ACC: bfa_sm_send_event(scn, SCNSM_EVENT_RSP_OK); break; case FC_ELS_LS_RJT: ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp); bfa_trc(port->fcs, ls_rjt->reason_code); bfa_trc(port->fcs, ls_rjt->reason_code_expl); bfa_sm_send_event(scn, SCNSM_EVENT_RSP_ERROR); break; default: bfa_sm_send_event(scn, SCNSM_EVENT_RSP_ERROR); } } /* * Send a LS Accept */ static void bfa_fcs_lport_scn_send_ls_acc(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs) { struct fchs_s fchs; struct bfa_fcxp_s *fcxp; struct bfa_rport_s *bfa_rport = NULL; int len; bfa_trc(port->fcs, rx_fchs->s_id); fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE); if (!fcxp) return; len = fc_ls_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rx_fchs->s_id, bfa_fcs_lport_get_fcid(port), rx_fchs->ox_id); bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); } /* * This routine will be called by bfa_timer on timer timeouts. * * param[in] vport - pointer to bfa_fcs_lport_t. * param[out] vport_status - pointer to return vport status in * * return * void * * Special Considerations: * * note */ static void bfa_fcs_lport_scn_timeout(void *arg) { struct bfa_fcs_lport_scn_s *scn = (struct bfa_fcs_lport_scn_s *) arg; bfa_sm_send_event(scn, SCNSM_EVENT_TIMEOUT); } /* * fcs_scn_public FCS state change notification public interfaces */ /* * Functions called by port/fab */ void bfa_fcs_lport_scn_init(struct bfa_fcs_lport_s *port) { struct bfa_fcs_lport_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port); scn->port = port; bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_offline); } void bfa_fcs_lport_scn_offline(struct bfa_fcs_lport_s *port) { struct bfa_fcs_lport_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port); scn->port = port; bfa_sm_send_event(scn, SCNSM_EVENT_PORT_OFFLINE); } void bfa_fcs_lport_fab_scn_online(struct bfa_fcs_lport_s *port) { struct bfa_fcs_lport_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port); scn->port = port; bfa_sm_send_event(scn, SCNSM_EVENT_PORT_ONLINE); } static void bfa_fcs_lport_scn_portid_rscn(struct bfa_fcs_lport_s *port, u32 rpid) { struct bfa_fcs_rport_s *rport; struct bfa_fcs_fabric_s *fabric = port->fabric; struct bfa_fcs_vport_s *vport; struct list_head *qe; bfa_trc(port->fcs, rpid); /* * Ignore PID if it is of base port or of vports created on the * same base port. It is to avoid vports discovering base port or * other vports created on same base port as remote port */ if (rpid == fabric->bport.pid) return; list_for_each(qe, &fabric->vport_q) { vport = (struct bfa_fcs_vport_s *) qe; if (vport->lport.pid == rpid) return; } /* * If this is an unknown device, then it just came online. * Otherwise let rport handle the RSCN event. */ rport = bfa_fcs_lport_get_rport_by_pid(port, rpid); if (!rport) rport = bfa_fcs_lport_get_rport_by_old_pid(port, rpid); if (rport == NULL) { /* * If min cfg mode is enabled, we donot need to * discover any new rports. */ if (!__fcs_min_cfg(port->fcs)) rport = bfa_fcs_rport_create(port, rpid); } else bfa_fcs_rport_scn(rport); } /* * rscn format based PID comparison */ #define __fc_pid_match(__c0, __c1, __fmt) \ (((__fmt) == FC_RSCN_FORMAT_FABRIC) || \ (((__fmt) == FC_RSCN_FORMAT_DOMAIN) && \ ((__c0)[0] == (__c1)[0])) || \ (((__fmt) == FC_RSCN_FORMAT_AREA) && \ ((__c0)[0] == (__c1)[0]) && \ ((__c0)[1] == (__c1)[1]))) static void bfa_fcs_lport_scn_multiport_rscn(struct bfa_fcs_lport_s *port, enum fc_rscn_format format, u32 rscn_pid) { struct bfa_fcs_rport_s *rport; struct list_head *qe, *qe_next; u8 *c0, *c1; bfa_trc(port->fcs, format); bfa_trc(port->fcs, rscn_pid); c0 = (u8 *) &rscn_pid; list_for_each_safe(qe, qe_next, &port->rport_q) { rport = (struct bfa_fcs_rport_s *) qe; c1 = (u8 *) &rport->pid; if (__fc_pid_match(c0, c1, format)) bfa_fcs_rport_scn(rport); } } void bfa_fcs_lport_scn_process_rscn(struct bfa_fcs_lport_s *port, struct fchs_s *fchs, u32 len) { struct fc_rscn_pl_s *rscn = (struct fc_rscn_pl_s *) (fchs + 1); int num_entries; u32 rscn_pid; bfa_boolean_t nsquery = BFA_FALSE, found; int i = 0, j; num_entries = (be16_to_cpu(rscn->payldlen) - sizeof(u32)) / sizeof(rscn->event[0]); bfa_trc(port->fcs, num_entries); port->stats.num_rscn++; bfa_fcs_lport_scn_send_ls_acc(port, fchs); for (i = 0; i < num_entries; i++) { rscn_pid = rscn->event[i].portid; bfa_trc(port->fcs, rscn->event[i].format); bfa_trc(port->fcs, rscn_pid); /* check for duplicate entries in the list */ found = BFA_FALSE; for (j = 0; j < i; j++) { if (rscn->event[j].portid == rscn_pid) { found = BFA_TRUE; break; } } /* if found in down the list, pid has been already processed */ if (found) { bfa_trc(port->fcs, rscn_pid); continue; } switch (rscn->event[i].format) { case FC_RSCN_FORMAT_PORTID: if (rscn->event[i].qualifier == FC_QOS_RSCN_EVENT) { /* * Ignore this event. * f/w would have processed it */ bfa_trc(port->fcs, rscn_pid); } else { port->stats.num_portid_rscn++; bfa_fcs_lport_scn_portid_rscn(port, rscn_pid); } break; case FC_RSCN_FORMAT_FABRIC: if (rscn->event[i].qualifier == FC_FABRIC_NAME_RSCN_EVENT) { bfa_fcs_lport_ms_fabric_rscn(port); break; } fallthrough; case FC_RSCN_FORMAT_AREA: case FC_RSCN_FORMAT_DOMAIN: nsquery = BFA_TRUE; bfa_fcs_lport_scn_multiport_rscn(port, rscn->event[i].format, rscn_pid); break; default: WARN_ON(1); nsquery = BFA_TRUE; } } /* * If any of area, domain or fabric RSCN is received, do a fresh * discovery to find new devices. */ if (nsquery) bfa_fcs_lport_ns_query(port); } /* * BFA FCS port */ /* * fcs_port_api BFA FCS port API */ struct bfa_fcs_lport_s * bfa_fcs_get_base_port(struct bfa_fcs_s *fcs) { return &fcs->fabric.bport; } wwn_t bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn, int index, int nrports, bfa_boolean_t bwwn) { struct list_head *qh, *qe; struct bfa_fcs_rport_s *rport = NULL; int i; struct bfa_fcs_s *fcs; if (port == NULL || nrports == 0) return (wwn_t) 0; fcs = port->fcs; bfa_trc(fcs, (u32) nrports); i = 0; qh = &port->rport_q; qe = bfa_q_first(qh); while ((qe != qh) && (i < nrports)) { rport = (struct bfa_fcs_rport_s *) qe; if (bfa_ntoh3b(rport->pid) > 0xFFF000) { qe = bfa_q_next(qe); bfa_trc(fcs, (u32) rport->pwwn); bfa_trc(fcs, rport->pid); bfa_trc(fcs, i); continue; } if (bwwn) { if (!memcmp(&wwn, &rport->pwwn, 8)) break; } else { if (i == index) break; } i++; qe = bfa_q_next(qe); } bfa_trc(fcs, i); if (rport) return rport->pwwn; else return (wwn_t) 0; } void bfa_fcs_lport_get_rport_quals(struct bfa_fcs_lport_s *port, struct bfa_rport_qualifier_s rports[], int *nrports) { struct list_head *qh, *qe; struct bfa_fcs_rport_s *rport = NULL; int i; struct bfa_fcs_s *fcs; if (port == NULL || rports == NULL || *nrports == 0) return; fcs = port->fcs; bfa_trc(fcs, (u32) *nrports); i = 0; qh = &port->rport_q; qe = bfa_q_first(qh); while ((qe != qh) && (i < *nrports)) { rport = (struct bfa_fcs_rport_s *) qe; if (bfa_ntoh3b(rport->pid) > 0xFFF000) { qe = bfa_q_next(qe); bfa_trc(fcs, (u32) rport->pwwn); bfa_trc(fcs, rport->pid); bfa_trc(fcs, i); continue; } if (!rport->pwwn && !rport->pid) { qe = bfa_q_next(qe); continue; } rports[i].pwwn = rport->pwwn; rports[i].pid = rport->pid; i++; qe = bfa_q_next(qe); } bfa_trc(fcs, i); *nrports = i; } /* * Iterate's through all the rport's in the given port to * determine the maximum operating speed. * * !!!! To be used in TRL Functionality only !!!! */ bfa_port_speed_t bfa_fcs_lport_get_rport_max_speed(bfa_fcs_lport_t *port) { struct list_head *qh, *qe; struct bfa_fcs_rport_s *rport = NULL; struct bfa_fcs_s *fcs; bfa_port_speed_t max_speed = 0; struct bfa_port_attr_s port_attr; bfa_port_speed_t port_speed, rport_speed; bfa_boolean_t trl_enabled; if (port == NULL) return 0; fcs = port->fcs; trl_enabled = bfa_fcport_is_ratelim(port->fcs->bfa); /* Get Physical port's current speed */ bfa_fcport_get_attr(port->fcs->bfa, &port_attr); port_speed = port_attr.speed; bfa_trc(fcs, port_speed); qh = &port->rport_q; qe = bfa_q_first(qh); while (qe != qh) { rport = (struct bfa_fcs_rport_s *) qe; if ((bfa_ntoh3b(rport->pid) > 0xFFF000) || (bfa_fcs_rport_get_state(rport) == BFA_RPORT_OFFLINE) || (rport->scsi_function != BFA_RPORT_TARGET)) { qe = bfa_q_next(qe); continue; } rport_speed = rport->rpf.rpsc_speed; if ((trl_enabled) && (rport_speed == BFA_PORT_SPEED_UNKNOWN)) { /* Use default ratelim speed setting */ rport_speed = bfa_fcport_get_ratelim_speed(port->fcs->bfa); } if (rport_speed > max_speed) max_speed = rport_speed; qe = bfa_q_next(qe); } if (max_speed > port_speed) max_speed = port_speed; bfa_trc(fcs, max_speed); return max_speed; } struct bfa_fcs_lport_s * bfa_fcs_lookup_port(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t lpwwn) { struct bfa_fcs_vport_s *vport; bfa_fcs_vf_t *vf; WARN_ON(fcs == NULL); vf = bfa_fcs_vf_lookup(fcs, vf_id); if (vf == NULL) { bfa_trc(fcs, vf_id); return NULL; } if (!lpwwn || (vf->bport.port_cfg.pwwn == lpwwn)) return &vf->bport; vport = bfa_fcs_fabric_vport_lookup(vf, lpwwn); if (vport) return &vport->lport; return NULL; } /* * API corresponding to NPIV_VPORT_GETINFO. */ void bfa_fcs_lport_get_info(struct bfa_fcs_lport_s *port, struct bfa_lport_info_s *port_info) { bfa_trc(port->fcs, port->fabric->fabric_name); if (port->vport == NULL) { /* * This is a Physical port */ port_info->port_type = BFA_LPORT_TYPE_PHYSICAL; /* * @todo : need to fix the state & reason */ port_info->port_state = 0; port_info->offline_reason = 0; port_info->port_wwn = bfa_fcs_lport_get_pwwn(port); port_info->node_wwn = bfa_fcs_lport_get_nwwn(port); port_info->max_vports_supp = bfa_lps_get_max_vport(port->fcs->bfa); port_info->num_vports_inuse = port->fabric->num_vports; port_info->max_rports_supp = BFA_FCS_MAX_RPORTS_SUPP; port_info->num_rports_inuse = port->num_rports; } else { /* * This is a virtual port */ port_info->port_type = BFA_LPORT_TYPE_VIRTUAL; /* * @todo : need to fix the state & reason */ port_info->port_state = 0; port_info->offline_reason = 0; port_info->port_wwn = bfa_fcs_lport_get_pwwn(port); port_info->node_wwn = bfa_fcs_lport_get_nwwn(port); } } void bfa_fcs_lport_get_stats(struct bfa_fcs_lport_s *fcs_port, struct bfa_lport_stats_s *port_stats) { *port_stats = fcs_port->stats; } void bfa_fcs_lport_clear_stats(struct bfa_fcs_lport_s *fcs_port) { memset(&fcs_port->stats, 0, sizeof(struct bfa_lport_stats_s)); } /* * Let new loop map create missing rports */ void bfa_fcs_lport_lip_scn_online(struct bfa_fcs_lport_s *port) { bfa_fcs_lport_loop_online(port); } /* * FCS virtual port state machine */ #define __vport_fcs(__vp) ((__vp)->lport.fcs) #define __vport_pwwn(__vp) ((__vp)->lport.port_cfg.pwwn) #define __vport_nwwn(__vp) ((__vp)->lport.port_cfg.nwwn) #define __vport_bfa(__vp) ((__vp)->lport.fcs->bfa) #define __vport_fcid(__vp) ((__vp)->lport.pid) #define __vport_fabric(__vp) ((__vp)->lport.fabric) #define __vport_vfid(__vp) ((__vp)->lport.fabric->vf_id) #define BFA_FCS_VPORT_MAX_RETRIES 5 /* * Forward declarations */ static void bfa_fcs_vport_do_fdisc(struct bfa_fcs_vport_s *vport); static void bfa_fcs_vport_timeout(void *vport_arg); static void bfa_fcs_vport_do_logo(struct bfa_fcs_vport_s *vport); static void bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport); /* * fcs_vport_sm FCS virtual port state machine */ /* * VPort State Machine events */ enum bfa_fcs_vport_event { BFA_FCS_VPORT_SM_CREATE = 1, /* vport create event */ BFA_FCS_VPORT_SM_DELETE = 2, /* vport delete event */ BFA_FCS_VPORT_SM_START = 3, /* vport start request */ BFA_FCS_VPORT_SM_STOP = 4, /* stop: unsupported */ BFA_FCS_VPORT_SM_ONLINE = 5, /* fabric online */ BFA_FCS_VPORT_SM_OFFLINE = 6, /* fabric offline event */ BFA_FCS_VPORT_SM_FRMSENT = 7, /* fdisc/logo sent events */ BFA_FCS_VPORT_SM_RSP_OK = 8, /* good response */ BFA_FCS_VPORT_SM_RSP_ERROR = 9, /* error/bad response */ BFA_FCS_VPORT_SM_TIMEOUT = 10, /* delay timer event */ BFA_FCS_VPORT_SM_DELCOMP = 11, /* lport delete completion */ BFA_FCS_VPORT_SM_RSP_DUP_WWN = 12, /* Dup wnn error*/ BFA_FCS_VPORT_SM_RSP_FAILED = 13, /* non-retryable failure */ BFA_FCS_VPORT_SM_STOPCOMP = 14, /* vport delete completion */ BFA_FCS_VPORT_SM_FABRIC_MAX = 15, /* max vports on fabric */ }; static void bfa_fcs_vport_sm_uninit(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event); static void bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event); static void bfa_fcs_vport_sm_offline(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event); static void bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event); static void bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event); static void bfa_fcs_vport_sm_fdisc_rsp_wait(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event); static void bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event); static void bfa_fcs_vport_sm_deleting(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event); static void bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event); static void bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event); static void bfa_fcs_vport_sm_error(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event); static void bfa_fcs_vport_sm_stopping(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event); static void bfa_fcs_vport_sm_logo_for_stop(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event); static struct bfa_sm_table_s vport_sm_table[] = { {BFA_SM(bfa_fcs_vport_sm_uninit), BFA_FCS_VPORT_UNINIT}, {BFA_SM(bfa_fcs_vport_sm_created), BFA_FCS_VPORT_CREATED}, {BFA_SM(bfa_fcs_vport_sm_offline), BFA_FCS_VPORT_OFFLINE}, {BFA_SM(bfa_fcs_vport_sm_fdisc), BFA_FCS_VPORT_FDISC}, {BFA_SM(bfa_fcs_vport_sm_fdisc_retry), BFA_FCS_VPORT_FDISC_RETRY}, {BFA_SM(bfa_fcs_vport_sm_fdisc_rsp_wait), BFA_FCS_VPORT_FDISC_RSP_WAIT}, {BFA_SM(bfa_fcs_vport_sm_online), BFA_FCS_VPORT_ONLINE}, {BFA_SM(bfa_fcs_vport_sm_deleting), BFA_FCS_VPORT_DELETING}, {BFA_SM(bfa_fcs_vport_sm_cleanup), BFA_FCS_VPORT_CLEANUP}, {BFA_SM(bfa_fcs_vport_sm_logo), BFA_FCS_VPORT_LOGO}, {BFA_SM(bfa_fcs_vport_sm_error), BFA_FCS_VPORT_ERROR} }; /* * Beginning state. */ static void bfa_fcs_vport_sm_uninit(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event) { bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); bfa_trc(__vport_fcs(vport), event); switch (event) { case BFA_FCS_VPORT_SM_CREATE: bfa_sm_set_state(vport, bfa_fcs_vport_sm_created); bfa_fcs_fabric_addvport(__vport_fabric(vport), vport); break; default: bfa_sm_fault(__vport_fcs(vport), event); } } /* * Created state - a start event is required to start up the state machine. */ static void bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event) { bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); bfa_trc(__vport_fcs(vport), event); switch (event) { case BFA_FCS_VPORT_SM_START: if (bfa_sm_cmp_state(__vport_fabric(vport), bfa_fcs_fabric_sm_online) && bfa_fcs_fabric_npiv_capable(__vport_fabric(vport))) { bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc); bfa_fcs_vport_do_fdisc(vport); } else { /* * Fabric is offline or not NPIV capable, stay in * offline state. */ vport->vport_stats.fab_no_npiv++; bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline); } break; case BFA_FCS_VPORT_SM_DELETE: bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup); bfa_fcs_lport_delete(&vport->lport); break; case BFA_FCS_VPORT_SM_ONLINE: case BFA_FCS_VPORT_SM_OFFLINE: /* * Ignore ONLINE/OFFLINE events from fabric * till vport is started. */ break; default: bfa_sm_fault(__vport_fcs(vport), event); } } /* * Offline state - awaiting ONLINE event from fabric SM. */ static void bfa_fcs_vport_sm_offline(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event) { bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); bfa_trc(__vport_fcs(vport), event); switch (event) { case BFA_FCS_VPORT_SM_DELETE: bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup); bfa_fcs_lport_delete(&vport->lport); break; case BFA_FCS_VPORT_SM_ONLINE: bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc); vport->fdisc_retries = 0; bfa_fcs_vport_do_fdisc(vport); break; case BFA_FCS_VPORT_SM_STOP: bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup); bfa_sm_send_event(&vport->lport, BFA_FCS_PORT_SM_STOP); break; case BFA_FCS_VPORT_SM_OFFLINE: /* * This can happen if the vport couldn't be initialzied * due the fact that the npiv was not enabled on the switch. * In that case we will put the vport in offline state. * However, the link can go down and cause the this event to * be sent when we are already offline. Ignore it. */ break; default: bfa_sm_fault(__vport_fcs(vport), event); } } /* * FDISC is sent and awaiting reply from fabric. */ static void bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event) { bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); bfa_trc(__vport_fcs(vport), event); switch (event) { case BFA_FCS_VPORT_SM_DELETE: bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc_rsp_wait); break; case BFA_FCS_VPORT_SM_OFFLINE: bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline); bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE); break; case BFA_FCS_VPORT_SM_RSP_OK: bfa_sm_set_state(vport, bfa_fcs_vport_sm_online); bfa_fcs_lport_online(&vport->lport); break; case BFA_FCS_VPORT_SM_RSP_ERROR: bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc_retry); bfa_timer_start(__vport_bfa(vport), &vport->timer, bfa_fcs_vport_timeout, vport, BFA_FCS_RETRY_TIMEOUT); break; case BFA_FCS_VPORT_SM_RSP_FAILED: case BFA_FCS_VPORT_SM_FABRIC_MAX: bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline); break; case BFA_FCS_VPORT_SM_RSP_DUP_WWN: bfa_sm_set_state(vport, bfa_fcs_vport_sm_error); break; default: bfa_sm_fault(__vport_fcs(vport), event); } } /* * FDISC attempt failed - a timer is active to retry FDISC. */ static void bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event) { bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); bfa_trc(__vport_fcs(vport), event); switch (event) { case BFA_FCS_VPORT_SM_DELETE: bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup); bfa_timer_stop(&vport->timer); bfa_fcs_lport_delete(&vport->lport); break; case BFA_FCS_VPORT_SM_OFFLINE: bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline); bfa_timer_stop(&vport->timer); break; case BFA_FCS_VPORT_SM_TIMEOUT: bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc); vport->vport_stats.fdisc_retries++; vport->fdisc_retries++; bfa_fcs_vport_do_fdisc(vport); break; default: bfa_sm_fault(__vport_fcs(vport), event); } } /* * FDISC is in progress and we got a vport delete request - * this is a wait state while we wait for fdisc response and * we will transition to the appropriate state - on rsp status. */ static void bfa_fcs_vport_sm_fdisc_rsp_wait(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event) { bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); bfa_trc(__vport_fcs(vport), event); switch (event) { case BFA_FCS_VPORT_SM_RSP_OK: bfa_sm_set_state(vport, bfa_fcs_vport_sm_deleting); bfa_fcs_lport_delete(&vport->lport); break; case BFA_FCS_VPORT_SM_DELETE: break; case BFA_FCS_VPORT_SM_OFFLINE: case BFA_FCS_VPORT_SM_RSP_ERROR: case BFA_FCS_VPORT_SM_RSP_FAILED: case BFA_FCS_VPORT_SM_FABRIC_MAX: case BFA_FCS_VPORT_SM_RSP_DUP_WWN: bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup); bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE); bfa_fcs_lport_delete(&vport->lport); break; default: bfa_sm_fault(__vport_fcs(vport), event); } } /* * Vport is online (FDISC is complete). */ static void bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event) { bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); bfa_trc(__vport_fcs(vport), event); switch (event) { case BFA_FCS_VPORT_SM_DELETE: bfa_sm_set_state(vport, bfa_fcs_vport_sm_deleting); bfa_fcs_lport_delete(&vport->lport); break; case BFA_FCS_VPORT_SM_STOP: bfa_sm_set_state(vport, bfa_fcs_vport_sm_stopping); bfa_sm_send_event(&vport->lport, BFA_FCS_PORT_SM_STOP); break; case BFA_FCS_VPORT_SM_OFFLINE: bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline); bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE); bfa_fcs_lport_offline(&vport->lport); break; default: bfa_sm_fault(__vport_fcs(vport), event); } } /* * Vport is being stopped - awaiting lport stop completion to send * LOGO to fabric. */ static void bfa_fcs_vport_sm_stopping(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event) { bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); bfa_trc(__vport_fcs(vport), event); switch (event) { case BFA_FCS_VPORT_SM_STOPCOMP: bfa_sm_set_state(vport, bfa_fcs_vport_sm_logo_for_stop); bfa_fcs_vport_do_logo(vport); break; case BFA_FCS_VPORT_SM_OFFLINE: bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup); break; default: bfa_sm_fault(__vport_fcs(vport), event); } } /* * Vport is being deleted - awaiting lport delete completion to send * LOGO to fabric. */ static void bfa_fcs_vport_sm_deleting(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event) { bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); bfa_trc(__vport_fcs(vport), event); switch (event) { case BFA_FCS_VPORT_SM_DELETE: break; case BFA_FCS_VPORT_SM_DELCOMP: bfa_sm_set_state(vport, bfa_fcs_vport_sm_logo); bfa_fcs_vport_do_logo(vport); break; case BFA_FCS_VPORT_SM_OFFLINE: bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup); break; default: bfa_sm_fault(__vport_fcs(vport), event); } } /* * Error State. * This state will be set when the Vport Creation fails due * to errors like Dup WWN. In this state only operation allowed * is a Vport Delete. */ static void bfa_fcs_vport_sm_error(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event) { bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); bfa_trc(__vport_fcs(vport), event); switch (event) { case BFA_FCS_VPORT_SM_DELETE: bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup); bfa_fcs_lport_delete(&vport->lport); break; default: bfa_trc(__vport_fcs(vport), event); } } /* * Lport cleanup is in progress since vport is being deleted. Fabric is * offline, so no LOGO is needed to complete vport deletion. */ static void bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event) { bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); bfa_trc(__vport_fcs(vport), event); switch (event) { case BFA_FCS_VPORT_SM_DELCOMP: bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit); bfa_fcs_vport_free(vport); break; case BFA_FCS_VPORT_SM_STOPCOMP: bfa_sm_set_state(vport, bfa_fcs_vport_sm_created); break; case BFA_FCS_VPORT_SM_DELETE: break; default: bfa_sm_fault(__vport_fcs(vport), event); } } /* * LOGO is sent to fabric. Vport stop is in progress. Lport stop cleanup * is done. */ static void bfa_fcs_vport_sm_logo_for_stop(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event) { bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); bfa_trc(__vport_fcs(vport), event); switch (event) { case BFA_FCS_VPORT_SM_OFFLINE: bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE); fallthrough; case BFA_FCS_VPORT_SM_RSP_OK: case BFA_FCS_VPORT_SM_RSP_ERROR: bfa_sm_set_state(vport, bfa_fcs_vport_sm_created); break; default: bfa_sm_fault(__vport_fcs(vport), event); } } /* * LOGO is sent to fabric. Vport delete is in progress. Lport delete cleanup * is done. */ static void bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport, enum bfa_fcs_vport_event event) { bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); bfa_trc(__vport_fcs(vport), event); switch (event) { case BFA_FCS_VPORT_SM_OFFLINE: bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE); fallthrough; case BFA_FCS_VPORT_SM_RSP_OK: case BFA_FCS_VPORT_SM_RSP_ERROR: bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit); bfa_fcs_vport_free(vport); break; case BFA_FCS_VPORT_SM_DELETE: break; default: bfa_sm_fault(__vport_fcs(vport), event); } } /* * fcs_vport_private FCS virtual port private functions */ /* * Send AEN notification */ static void bfa_fcs_vport_aen_post(struct bfa_fcs_lport_s *port, enum bfa_lport_aen_event event) { struct bfad_s *bfad = (struct bfad_s *)port->fabric->fcs->bfad; struct bfa_aen_entry_s *aen_entry; bfad_get_aen_entry(bfad, aen_entry); if (!aen_entry) return; aen_entry->aen_data.lport.vf_id = port->fabric->vf_id; aen_entry->aen_data.lport.roles = port->port_cfg.roles; aen_entry->aen_data.lport.ppwwn = bfa_fcs_lport_get_pwwn( bfa_fcs_get_base_port(port->fcs)); aen_entry->aen_data.lport.lpwwn = bfa_fcs_lport_get_pwwn(port); /* Send the AEN notification */ bfad_im_post_vendor_event(aen_entry, bfad, ++port->fcs->fcs_aen_seq, BFA_AEN_CAT_LPORT, event); } /* * This routine will be called to send a FDISC command. */ static void bfa_fcs_vport_do_fdisc(struct bfa_fcs_vport_s *vport) { bfa_lps_fdisc(vport->lps, vport, bfa_fcport_get_maxfrsize(__vport_bfa(vport)), __vport_pwwn(vport), __vport_nwwn(vport)); vport->vport_stats.fdisc_sent++; } static void bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport) { u8 lsrjt_rsn = vport->lps->lsrjt_rsn; u8 lsrjt_expl = vport->lps->lsrjt_expl; bfa_trc(__vport_fcs(vport), lsrjt_rsn); bfa_trc(__vport_fcs(vport), lsrjt_expl); /* For certain reason codes, we don't want to retry. */ switch (vport->lps->lsrjt_expl) { case FC_LS_RJT_EXP_INV_PORT_NAME: /* by brocade */ case FC_LS_RJT_EXP_INVALID_NPORT_ID: /* by Cisco */ if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES) bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR); else { bfa_fcs_vport_aen_post(&vport->lport, BFA_LPORT_AEN_NPIV_DUP_WWN); bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_DUP_WWN); } break; case FC_LS_RJT_EXP_INSUFF_RES: /* * This means max logins per port/switch setting on the * switch was exceeded. */ if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES) bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR); else { bfa_fcs_vport_aen_post(&vport->lport, BFA_LPORT_AEN_NPIV_FABRIC_MAX); bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_FABRIC_MAX); } break; default: if (vport->fdisc_retries == 0) bfa_fcs_vport_aen_post(&vport->lport, BFA_LPORT_AEN_NPIV_UNKNOWN); bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR); } } /* * Called to send a logout to the fabric. Used when a V-Port is * deleted/stopped. */ static void bfa_fcs_vport_do_logo(struct bfa_fcs_vport_s *vport) { bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); vport->vport_stats.logo_sent++; bfa_lps_fdisclogo(vport->lps); } /* * This routine will be called by bfa_timer on timer timeouts. * * param[in] vport - pointer to bfa_fcs_vport_t. * param[out] vport_status - pointer to return vport status in * * return * void * * Special Considerations: * * note */ static void bfa_fcs_vport_timeout(void *vport_arg) { struct bfa_fcs_vport_s *vport = (struct bfa_fcs_vport_s *) vport_arg; vport->vport_stats.fdisc_timeouts++; bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_TIMEOUT); } static void bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport) { struct bfad_vport_s *vport_drv = (struct bfad_vport_s *)vport->vport_drv; bfa_fcs_fabric_delvport(__vport_fabric(vport), vport); bfa_lps_delete(vport->lps); if (vport_drv->comp_del) { complete(vport_drv->comp_del); return; } /* * We queue the vport delete work to the IM work_q from here. * The memory for the bfad_vport_s is freed from the FC function * template vport_delete entry point. */ bfad_im_port_delete(vport_drv->drv_port.bfad, &vport_drv->drv_port); } /* * fcs_vport_public FCS virtual port public interfaces */ /* * Online notification from fabric SM. */ void bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport) { vport->vport_stats.fab_online++; if (bfa_fcs_fabric_npiv_capable(__vport_fabric(vport))) bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE); else vport->vport_stats.fab_no_npiv++; } /* * Offline notification from fabric SM. */ void bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport) { vport->vport_stats.fab_offline++; bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_OFFLINE); } /* * Cleanup notification from fabric SM on link timer expiry. */ void bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport) { vport->vport_stats.fab_cleanup++; } /* * Stop notification from fabric SM. To be invoked from within FCS. */ void bfa_fcs_vport_fcs_stop(struct bfa_fcs_vport_s *vport) { bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_STOP); } /* * delete notification from fabric SM. To be invoked from within FCS. */ void bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport) { bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELETE); } /* * Stop completion callback from associated lport */ void bfa_fcs_vport_stop_comp(struct bfa_fcs_vport_s *vport) { bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_STOPCOMP); } /* * Delete completion callback from associated lport */ void bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport) { bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELCOMP); } /* * fcs_vport_api Virtual port API */ /* * Use this function to instantiate a new FCS vport object. This * function will not trigger any HW initialization process (which will be * done in vport_start() call) * * param[in] vport - pointer to bfa_fcs_vport_t. This space * needs to be allocated by the driver. * param[in] fcs - FCS instance * param[in] vport_cfg - vport configuration * param[in] vf_id - VF_ID if vport is created within a VF. * FC_VF_ID_NULL to specify base fabric. * param[in] vport_drv - Opaque handle back to the driver's vport * structure * * retval BFA_STATUS_OK - on success. * retval BFA_STATUS_FAILED - on failure. */ bfa_status_t bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs, u16 vf_id, struct bfa_lport_cfg_s *vport_cfg, struct bfad_vport_s *vport_drv) { if (vport_cfg->pwwn == 0) return BFA_STATUS_INVALID_WWN; if (bfa_fcs_lport_get_pwwn(&fcs->fabric.bport) == vport_cfg->pwwn) return BFA_STATUS_VPORT_WWN_BP; if (bfa_fcs_vport_lookup(fcs, vf_id, vport_cfg->pwwn) != NULL) return BFA_STATUS_VPORT_EXISTS; if (fcs->fabric.num_vports == bfa_lps_get_max_vport(fcs->bfa)) return BFA_STATUS_VPORT_MAX; vport->lps = bfa_lps_alloc(fcs->bfa); if (!vport->lps) return BFA_STATUS_VPORT_MAX; vport->vport_drv = vport_drv; vport_cfg->preboot_vp = BFA_FALSE; bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit); bfa_fcs_lport_attach(&vport->lport, fcs, vf_id, vport); bfa_fcs_lport_init(&vport->lport, vport_cfg); bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_CREATE); return BFA_STATUS_OK; } /* * Use this function to instantiate a new FCS PBC vport object. This * function will not trigger any HW initialization process (which will be * done in vport_start() call) * * param[in] vport - pointer to bfa_fcs_vport_t. This space * needs to be allocated by the driver. * param[in] fcs - FCS instance * param[in] vport_cfg - vport configuration * param[in] vf_id - VF_ID if vport is created within a VF. * FC_VF_ID_NULL to specify base fabric. * param[in] vport_drv - Opaque handle back to the driver's vport * structure * * retval BFA_STATUS_OK - on success. * retval BFA_STATUS_FAILED - on failure. */ bfa_status_t bfa_fcs_pbc_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs, u16 vf_id, struct bfa_lport_cfg_s *vport_cfg, struct bfad_vport_s *vport_drv) { bfa_status_t rc; rc = bfa_fcs_vport_create(vport, fcs, vf_id, vport_cfg, vport_drv); vport->lport.port_cfg.preboot_vp = BFA_TRUE; return rc; } /* * Use this function to findout if this is a pbc vport or not. * * @param[in] vport - pointer to bfa_fcs_vport_t. * * @returns None */ bfa_boolean_t bfa_fcs_is_pbc_vport(struct bfa_fcs_vport_s *vport) { if (vport && (vport->lport.port_cfg.preboot_vp == BFA_TRUE)) return BFA_TRUE; else return BFA_FALSE; } /* * Use this function initialize the vport. * * @param[in] vport - pointer to bfa_fcs_vport_t. * * @returns None */ bfa_status_t bfa_fcs_vport_start(struct bfa_fcs_vport_s *vport) { bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_START); return BFA_STATUS_OK; } /* * Use this function quiese the vport object. This function will return * immediately, when the vport is actually stopped, the * bfa_drv_vport_stop_cb() will be called. * * param[in] vport - pointer to bfa_fcs_vport_t. * * return None */ bfa_status_t bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport) { bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_STOP); return BFA_STATUS_OK; } /* * Use this function to delete a vport object. Fabric object should * be stopped before this function call. * * !!!!!!! Donot invoke this from within FCS !!!!!!! * * param[in] vport - pointer to bfa_fcs_vport_t. * * return None */ bfa_status_t bfa_fcs_vport_delete(struct bfa_fcs_vport_s *vport) { if (vport->lport.port_cfg.preboot_vp) return BFA_STATUS_PBC; bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELETE); return BFA_STATUS_OK; } /* * Use this function to get vport's current status info. * * param[in] vport pointer to bfa_fcs_vport_t. * param[out] attr pointer to return vport attributes * * return None */ void bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport, struct bfa_vport_attr_s *attr) { if (vport == NULL || attr == NULL) return; memset(attr, 0, sizeof(struct bfa_vport_attr_s)); bfa_fcs_lport_get_attr(&vport->lport, &attr->port_attr); attr->vport_state = bfa_sm_to_state(vport_sm_table, vport->sm); } /* * Lookup a virtual port. Excludes base port from lookup. */ struct bfa_fcs_vport_s * bfa_fcs_vport_lookup(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t vpwwn) { struct bfa_fcs_vport_s *vport; struct bfa_fcs_fabric_s *fabric; bfa_trc(fcs, vf_id); bfa_trc(fcs, vpwwn); fabric = bfa_fcs_vf_lookup(fcs, vf_id); if (!fabric) { bfa_trc(fcs, vf_id); return NULL; } vport = bfa_fcs_fabric_vport_lookup(fabric, vpwwn); return vport; } /* * FDISC Response */ void bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status) { struct bfa_fcs_vport_s *vport = uarg; bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); bfa_trc(__vport_fcs(vport), status); switch (status) { case BFA_STATUS_OK: /* * Initialize the V-Port fields */ __vport_fcid(vport) = vport->lps->lp_pid; vport->vport_stats.fdisc_accepts++; bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK); break; case BFA_STATUS_INVALID_MAC: /* Only for CNA */ vport->vport_stats.fdisc_acc_bad++; bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR); break; case BFA_STATUS_EPROTOCOL: switch (vport->lps->ext_status) { case BFA_EPROTO_BAD_ACCEPT: vport->vport_stats.fdisc_acc_bad++; break; case BFA_EPROTO_UNKNOWN_RSP: vport->vport_stats.fdisc_unknown_rsp++; break; default: break; } if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES) bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR); else bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_FAILED); break; case BFA_STATUS_ETIMER: vport->vport_stats.fdisc_timeouts++; if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES) bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR); else bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_FAILED); break; case BFA_STATUS_FABRIC_RJT: vport->vport_stats.fdisc_rejects++; bfa_fcs_vport_fdisc_rejected(vport); break; default: vport->vport_stats.fdisc_rsp_err++; bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR); } } /* * LOGO response */ void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg) { struct bfa_fcs_vport_s *vport = uarg; bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK); } /* * Received clear virtual link */ void bfa_cb_lps_cvl_event(void *bfad, void *uarg) { struct bfa_fcs_vport_s *vport = uarg; /* Send an Offline followed by an ONLINE */ bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_OFFLINE); bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE); }
linux-master
drivers/scsi/bfa/bfa_fcs_lport.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. * Copyright (c) 2014- QLogic Corporation. * All rights reserved * www.qlogic.com * * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. */ /* * bfa_fcs.c BFA FCS main */ #include "bfad_drv.h" #include "bfad_im.h" #include "bfa_fcs.h" #include "bfa_fcbuild.h" BFA_TRC_FILE(FCS, FCS); /* * fcs_api BFA FCS API */ static void bfa_fcs_exit_comp(void *fcs_cbarg) { struct bfa_fcs_s *fcs = fcs_cbarg; struct bfad_s *bfad = fcs->bfad; complete(&bfad->comp); } /* * fcs initialization, called once after bfa initialization is complete */ void bfa_fcs_init(struct bfa_fcs_s *fcs) { bfa_sm_send_event(&fcs->fabric, BFA_FCS_FABRIC_SM_CREATE); bfa_trc(fcs, 0); } /* * fcs_api BFA FCS API */ /* * FCS update cfg - reset the pwwn/nwwn of fabric base logical port * with values learned during bfa_init firmware GETATTR REQ. */ void bfa_fcs_update_cfg(struct bfa_fcs_s *fcs) { struct bfa_fcs_fabric_s *fabric = &fcs->fabric; struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg; struct bfa_ioc_s *ioc = &fabric->fcs->bfa->ioc; port_cfg->nwwn = ioc->attr->nwwn; port_cfg->pwwn = ioc->attr->pwwn; } /* * Stop FCS operations. */ void bfa_fcs_stop(struct bfa_fcs_s *fcs) { bfa_wc_init(&fcs->wc, bfa_fcs_exit_comp, fcs); bfa_wc_up(&fcs->wc); bfa_fcs_fabric_modstop(fcs); bfa_wc_wait(&fcs->wc); } /* * fcs pbc vport initialization */ void bfa_fcs_pbc_vport_init(struct bfa_fcs_s *fcs) { int i, npbc_vports; struct bfi_pbc_vport_s pbc_vports[BFI_PBC_MAX_VPORTS]; /* Initialize pbc vports */ if (!fcs->min_cfg) { npbc_vports = bfa_iocfc_get_pbc_vports(fcs->bfa, pbc_vports); for (i = 0; i < npbc_vports; i++) bfa_fcb_pbc_vport_create(fcs->bfa->bfad, pbc_vports[i]); } } /* * brief * FCS driver details initialization. * * param[in] fcs FCS instance * param[in] driver_info Driver Details * * return None */ void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs, struct bfa_fcs_driver_info_s *driver_info) { fcs->driver_info = *driver_info; bfa_fcs_fabric_psymb_init(&fcs->fabric); bfa_fcs_fabric_nsymb_init(&fcs->fabric); } /* * brief * FCS instance cleanup and exit. * * param[in] fcs FCS instance * return None */ void bfa_fcs_exit(struct bfa_fcs_s *fcs) { bfa_wc_init(&fcs->wc, bfa_fcs_exit_comp, fcs); bfa_wc_up(&fcs->wc); bfa_trc(fcs, 0); bfa_lps_delete(fcs->fabric.lps); bfa_sm_send_event(&fcs->fabric, BFA_FCS_FABRIC_SM_DELETE); bfa_wc_wait(&fcs->wc); } /* * Fabric module implementation. */ #define BFA_FCS_FABRIC_RETRY_DELAY (2000) /* Milliseconds */ #define BFA_FCS_FABRIC_CLEANUP_DELAY (10000) /* Milliseconds */ #define bfa_fcs_fabric_set_opertype(__fabric) do { \ if (bfa_fcport_get_topology((__fabric)->fcs->bfa) \ == BFA_PORT_TOPOLOGY_P2P) { \ if (fabric->fab_type == BFA_FCS_FABRIC_SWITCHED) \ (__fabric)->oper_type = BFA_PORT_TYPE_NPORT; \ else \ (__fabric)->oper_type = BFA_PORT_TYPE_P2P; \ } else \ (__fabric)->oper_type = BFA_PORT_TYPE_NLPORT; \ } while (0) /* * forward declarations */ static void bfa_fcs_fabric_init(struct bfa_fcs_fabric_s *fabric); static void bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric); static void bfa_fcs_fabric_notify_online(struct bfa_fcs_fabric_s *fabric); static void bfa_fcs_fabric_notify_offline(struct bfa_fcs_fabric_s *fabric); static void bfa_fcs_fabric_delay(void *cbarg); static void bfa_fcs_fabric_delete(struct bfa_fcs_fabric_s *fabric); static void bfa_fcs_fabric_delete_comp(void *cbarg); static void bfa_fcs_fabric_stop(struct bfa_fcs_fabric_s *fabric); static void bfa_fcs_fabric_stop_comp(void *cbarg); static void bfa_fcs_fabric_process_uf(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs, u16 len); static void bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs, u16 len); static void bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric); static void bfa_fcs_fabric_flogiacc_comp(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t status, u32 rsp_len, u32 resid_len, struct fchs_s *rspfchs); static void bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_stopping(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_cleanup(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); /* * Beginning state before fabric creation. */ static void bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_CREATE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created); bfa_fcs_fabric_init(fabric); bfa_fcs_lport_init(&fabric->bport, &fabric->bport.port_cfg); break; case BFA_FCS_FABRIC_SM_LINK_UP: case BFA_FCS_FABRIC_SM_LINK_DOWN: break; default: bfa_sm_fault(fabric->fcs, event); } } /* * Beginning state before fabric creation. */ static void bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { struct bfa_s *bfa = fabric->fcs->bfa; bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_START: if (!bfa_fcport_is_linkup(fabric->fcs->bfa)) { bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); break; } if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) { fabric->fab_type = BFA_FCS_FABRIC_LOOP; fabric->bport.pid = bfa_fcport_get_myalpa(bfa); fabric->bport.pid = bfa_hton3b(fabric->bport.pid); bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_online); bfa_fcs_fabric_set_opertype(fabric); bfa_fcs_lport_online(&fabric->bport); } else { bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi); bfa_fcs_fabric_login(fabric); } break; case BFA_FCS_FABRIC_SM_LINK_UP: case BFA_FCS_FABRIC_SM_LINK_DOWN: break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); bfa_fcs_fabric_delete(fabric); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * Link is down, awaiting LINK UP event from port. This is also the * first state at fabric creation. */ static void bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { struct bfa_s *bfa = fabric->fcs->bfa; bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_LINK_UP: if (bfa_fcport_get_topology(bfa) != BFA_PORT_TOPOLOGY_LOOP) { bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi); bfa_fcs_fabric_login(fabric); break; } fabric->fab_type = BFA_FCS_FABRIC_LOOP; fabric->bport.pid = bfa_fcport_get_myalpa(bfa); fabric->bport.pid = bfa_hton3b(fabric->bport.pid); bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_online); bfa_fcs_fabric_set_opertype(fabric); bfa_fcs_lport_online(&fabric->bport); break; case BFA_FCS_FABRIC_SM_RETRY_OP: case BFA_FCS_FABRIC_SM_LOOPBACK: break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); bfa_fcs_fabric_delete(fabric); break; case BFA_FCS_FABRIC_SM_STOP: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup); bfa_fcs_fabric_stop(fabric); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * FLOGI is in progress, awaiting FLOGI reply. */ static void bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_CONT_OP: bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit); fabric->fab_type = BFA_FCS_FABRIC_SWITCHED; if (fabric->auth_reqd && fabric->is_auth) { bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth); bfa_trc(fabric->fcs, event); } else { bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_online); bfa_fcs_fabric_notify_online(fabric); } break; case BFA_FCS_FABRIC_SM_RETRY_OP: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi_retry); bfa_timer_start(fabric->fcs->bfa, &fabric->delay_timer, bfa_fcs_fabric_delay, fabric, BFA_FCS_FABRIC_RETRY_DELAY); break; case BFA_FCS_FABRIC_SM_LOOPBACK: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_loopback); bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); bfa_fcs_fabric_set_opertype(fabric); break; case BFA_FCS_FABRIC_SM_NO_FABRIC: fabric->fab_type = BFA_FCS_FABRIC_N2N; bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit); bfa_fcs_fabric_notify_online(fabric); bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_nofabric); break; case BFA_FCS_FABRIC_SM_LINK_DOWN: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); bfa_fcs_fabric_delete(fabric); break; default: bfa_sm_fault(fabric->fcs, event); } } static void bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_DELAYED: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi); bfa_fcs_fabric_login(fabric); break; case BFA_FCS_FABRIC_SM_LINK_DOWN: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); bfa_timer_stop(&fabric->delay_timer); break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); bfa_timer_stop(&fabric->delay_timer); bfa_fcs_fabric_delete(fabric); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * Authentication is in progress, awaiting authentication results. */ static void bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_AUTH_FAILED: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed); bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); break; case BFA_FCS_FABRIC_SM_AUTH_SUCCESS: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_online); bfa_fcs_fabric_notify_online(fabric); break; case BFA_FCS_FABRIC_SM_PERF_EVFP: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_evfp); break; case BFA_FCS_FABRIC_SM_LINK_DOWN: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); bfa_fcs_fabric_delete(fabric); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * Authentication failed */ void bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_LINK_DOWN: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); bfa_fcs_fabric_notify_offline(fabric); break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); bfa_fcs_fabric_delete(fabric); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * Port is in loopback mode. */ void bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_LINK_DOWN: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); bfa_fcs_fabric_notify_offline(fabric); break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); bfa_fcs_fabric_delete(fabric); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * There is no attached fabric - private loop or NPort-to-NPort topology. */ static void bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_LINK_DOWN: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); bfa_fcs_fabric_notify_offline(fabric); break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); bfa_fcs_fabric_delete(fabric); break; case BFA_FCS_FABRIC_SM_NO_FABRIC: bfa_trc(fabric->fcs, fabric->bb_credit); bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit); break; case BFA_FCS_FABRIC_SM_RETRY_OP: break; default: bfa_sm_fault(fabric->fcs, event); } } /* * Fabric is online - normal operating state. */ void bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { struct bfa_s *bfa = fabric->fcs->bfa; bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_LINK_DOWN: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) { bfa_fcs_lport_offline(&fabric->bport); } else { bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); bfa_fcs_fabric_notify_offline(fabric); } break; case BFA_FCS_FABRIC_SM_DELETE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); bfa_fcs_fabric_delete(fabric); break; case BFA_FCS_FABRIC_SM_STOP: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_stopping); bfa_fcs_fabric_stop(fabric); break; case BFA_FCS_FABRIC_SM_AUTH_FAILED: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed); bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); break; case BFA_FCS_FABRIC_SM_AUTH_SUCCESS: break; default: bfa_sm_fault(fabric->fcs, event); } } /* * Exchanging virtual fabric parameters. */ static void bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_CONT_OP: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_evfp_done); break; case BFA_FCS_FABRIC_SM_ISOLATE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_isolated); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * EVFP exchange complete and VFT tagging is enabled. */ static void bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); } /* * Port is isolated after EVFP exchange due to VF_ID mismatch (N and F). */ static void bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { struct bfad_s *bfad = (struct bfad_s *)fabric->fcs->bfad; char pwwn_ptr[BFA_STRING_32]; bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); wwn2str(pwwn_ptr, fabric->bport.port_cfg.pwwn); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Port is isolated due to VF_ID mismatch. " "PWWN: %s Port VF_ID: %04x switch port VF_ID: %04x.", pwwn_ptr, fabric->fcs->port_vfid, fabric->event_arg.swp_vfid); } /* * Fabric is being deleted, awaiting vport delete completions. */ static void bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_DELCOMP: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit); bfa_wc_down(&fabric->fcs->wc); break; case BFA_FCS_FABRIC_SM_LINK_UP: break; case BFA_FCS_FABRIC_SM_LINK_DOWN: bfa_fcs_fabric_notify_offline(fabric); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * Fabric is being stopped, awaiting vport stop completions. */ static void bfa_fcs_fabric_sm_stopping(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { struct bfa_s *bfa = fabric->fcs->bfa; bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_STOPCOMP: if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) { bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created); } else { bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup); bfa_sm_send_event(fabric->lps, BFA_LPS_SM_LOGOUT); } break; case BFA_FCS_FABRIC_SM_LINK_UP: break; case BFA_FCS_FABRIC_SM_LINK_DOWN: if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created); else bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup); break; default: bfa_sm_fault(fabric->fcs, event); } } /* * Fabric is being stopped, cleanup without FLOGO */ static void bfa_fcs_fabric_sm_cleanup(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, event); switch (event) { case BFA_FCS_FABRIC_SM_STOPCOMP: case BFA_FCS_FABRIC_SM_LOGOCOMP: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created); bfa_wc_down(&(fabric->fcs)->wc); break; case BFA_FCS_FABRIC_SM_LINK_DOWN: /* * Ignore - can get this event if we get notified about IOC down * before the fabric completion callbk is done. */ break; default: bfa_sm_fault(fabric->fcs, event); } } /* * fcs_fabric_private fabric private functions */ static void bfa_fcs_fabric_init(struct bfa_fcs_fabric_s *fabric) { struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg; port_cfg->roles = BFA_LPORT_ROLE_FCP_IM; port_cfg->nwwn = fabric->fcs->bfa->ioc.attr->nwwn; port_cfg->pwwn = fabric->fcs->bfa->ioc.attr->pwwn; } /* * Port Symbolic Name Creation for base port. */ void bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric) { struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg; char model[BFA_ADAPTER_MODEL_NAME_LEN] = {0}; struct bfa_fcs_driver_info_s *driver_info = &fabric->fcs->driver_info; bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model); /* Model name/number */ strscpy(port_cfg->sym_name.symname, model, BFA_SYMNAME_MAXLEN); strlcat(port_cfg->sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR, BFA_SYMNAME_MAXLEN); /* Driver Version */ strlcat(port_cfg->sym_name.symname, driver_info->version, BFA_SYMNAME_MAXLEN); strlcat(port_cfg->sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR, BFA_SYMNAME_MAXLEN); /* Host machine name */ strlcat(port_cfg->sym_name.symname, driver_info->host_machine_name, BFA_SYMNAME_MAXLEN); strlcat(port_cfg->sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR, BFA_SYMNAME_MAXLEN); /* * Host OS Info : * If OS Patch Info is not there, do not truncate any bytes from the * OS name string and instead copy the entire OS info string (64 bytes). */ if (driver_info->host_os_patch[0] == '\0') { strlcat(port_cfg->sym_name.symname, driver_info->host_os_name, BFA_SYMNAME_MAXLEN); strlcat(port_cfg->sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR, BFA_SYMNAME_MAXLEN); } else { strlcat(port_cfg->sym_name.symname, driver_info->host_os_name, BFA_SYMNAME_MAXLEN); strlcat(port_cfg->sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR, BFA_SYMNAME_MAXLEN); /* Append host OS Patch Info */ strlcat(port_cfg->sym_name.symname, driver_info->host_os_patch, BFA_SYMNAME_MAXLEN); } /* null terminate */ port_cfg->sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0; } /* * Node Symbolic Name Creation for base port and all vports */ void bfa_fcs_fabric_nsymb_init(struct bfa_fcs_fabric_s *fabric) { struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg; char model[BFA_ADAPTER_MODEL_NAME_LEN] = {0}; struct bfa_fcs_driver_info_s *driver_info = &fabric->fcs->driver_info; bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model); /* Model name/number */ strscpy(port_cfg->node_sym_name.symname, model, BFA_SYMNAME_MAXLEN); strlcat(port_cfg->node_sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR, BFA_SYMNAME_MAXLEN); /* Driver Version */ strlcat(port_cfg->node_sym_name.symname, (char *)driver_info->version, BFA_SYMNAME_MAXLEN); strlcat(port_cfg->node_sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR, BFA_SYMNAME_MAXLEN); /* Host machine name */ strlcat(port_cfg->node_sym_name.symname, driver_info->host_machine_name, BFA_SYMNAME_MAXLEN); strlcat(port_cfg->node_sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR, BFA_SYMNAME_MAXLEN); /* null terminate */ port_cfg->node_sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0; } /* * bfa lps login completion callback */ void bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status) { struct bfa_fcs_fabric_s *fabric = uarg; bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_trc(fabric->fcs, status); switch (status) { case BFA_STATUS_OK: fabric->stats.flogi_accepts++; break; case BFA_STATUS_INVALID_MAC: /* Only for CNA */ fabric->stats.flogi_acc_err++; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP); return; case BFA_STATUS_EPROTOCOL: switch (fabric->lps->ext_status) { case BFA_EPROTO_BAD_ACCEPT: fabric->stats.flogi_acc_err++; break; case BFA_EPROTO_UNKNOWN_RSP: fabric->stats.flogi_unknown_rsp++; break; default: break; } bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP); return; case BFA_STATUS_FABRIC_RJT: fabric->stats.flogi_rejects++; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP); return; default: fabric->stats.flogi_rsp_err++; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP); return; } fabric->bb_credit = fabric->lps->pr_bbcred; bfa_trc(fabric->fcs, fabric->bb_credit); if (!(fabric->lps->brcd_switch)) fabric->fabric_name = fabric->lps->pr_nwwn; /* * Check port type. It should be 1 = F-port. */ if (fabric->lps->fport) { fabric->bport.pid = fabric->lps->lp_pid; fabric->is_npiv = fabric->lps->npiv_en; fabric->is_auth = fabric->lps->auth_req; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_CONT_OP); } else { /* * Nport-2-Nport direct attached */ fabric->bport.port_topo.pn2n.rem_port_wwn = fabric->lps->pr_pwwn; fabric->fab_type = BFA_FCS_FABRIC_N2N; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC); } bfa_trc(fabric->fcs, fabric->bport.pid); bfa_trc(fabric->fcs, fabric->is_npiv); bfa_trc(fabric->fcs, fabric->is_auth); } /* * Allocate and send FLOGI. */ static void bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric) { struct bfa_s *bfa = fabric->fcs->bfa; struct bfa_lport_cfg_s *pcfg = &fabric->bport.port_cfg; u8 alpa = 0; bfa_lps_flogi(fabric->lps, fabric, alpa, bfa_fcport_get_maxfrsize(bfa), pcfg->pwwn, pcfg->nwwn, fabric->auth_reqd); fabric->stats.flogi_sent++; } static void bfa_fcs_fabric_notify_online(struct bfa_fcs_fabric_s *fabric) { struct bfa_fcs_vport_s *vport; struct list_head *qe, *qen; bfa_trc(fabric->fcs, fabric->fabric_name); bfa_fcs_fabric_set_opertype(fabric); fabric->stats.fabric_onlines++; /* * notify online event to base and then virtual ports */ bfa_fcs_lport_online(&fabric->bport); list_for_each_safe(qe, qen, &fabric->vport_q) { vport = (struct bfa_fcs_vport_s *) qe; bfa_fcs_vport_online(vport); } } static void bfa_fcs_fabric_notify_offline(struct bfa_fcs_fabric_s *fabric) { struct bfa_fcs_vport_s *vport; struct list_head *qe, *qen; bfa_trc(fabric->fcs, fabric->fabric_name); fabric->stats.fabric_offlines++; /* * notify offline event first to vports and then base port. */ list_for_each_safe(qe, qen, &fabric->vport_q) { vport = (struct bfa_fcs_vport_s *) qe; bfa_fcs_vport_offline(vport); } bfa_fcs_lport_offline(&fabric->bport); fabric->fabric_name = 0; fabric->fabric_ip_addr[0] = 0; } static void bfa_fcs_fabric_delay(void *cbarg) { struct bfa_fcs_fabric_s *fabric = cbarg; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELAYED); } /* * Stop all vports and wait for vport stop completions. */ static void bfa_fcs_fabric_stop(struct bfa_fcs_fabric_s *fabric) { struct bfa_fcs_vport_s *vport; struct list_head *qe, *qen; bfa_wc_init(&fabric->stop_wc, bfa_fcs_fabric_stop_comp, fabric); list_for_each_safe(qe, qen, &fabric->vport_q) { vport = (struct bfa_fcs_vport_s *) qe; bfa_wc_up(&fabric->stop_wc); bfa_fcs_vport_fcs_stop(vport); } bfa_wc_up(&fabric->stop_wc); bfa_fcs_lport_stop(&fabric->bport); bfa_wc_wait(&fabric->stop_wc); } /* * Delete all vports and wait for vport delete completions. */ static void bfa_fcs_fabric_delete(struct bfa_fcs_fabric_s *fabric) { struct bfa_fcs_vport_s *vport; struct list_head *qe, *qen; list_for_each_safe(qe, qen, &fabric->vport_q) { vport = (struct bfa_fcs_vport_s *) qe; bfa_fcs_vport_fcs_delete(vport); } bfa_fcs_lport_delete(&fabric->bport); bfa_wc_wait(&fabric->wc); } static void bfa_fcs_fabric_delete_comp(void *cbarg) { struct bfa_fcs_fabric_s *fabric = cbarg; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELCOMP); } static void bfa_fcs_fabric_stop_comp(void *cbarg) { struct bfa_fcs_fabric_s *fabric = cbarg; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_STOPCOMP); } /* * fcs_fabric_public fabric public functions */ /* * Fabric module stop -- stop FCS actions */ void bfa_fcs_fabric_modstop(struct bfa_fcs_s *fcs) { struct bfa_fcs_fabric_s *fabric; bfa_trc(fcs, 0); fabric = &fcs->fabric; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_STOP); } /* * Fabric module start -- kick starts FCS actions */ void bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs) { struct bfa_fcs_fabric_s *fabric; bfa_trc(fcs, 0); fabric = &fcs->fabric; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_START); } /* * Link up notification from BFA physical port module. */ void bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_UP); } /* * Link down notification from BFA physical port module. */ void bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_DOWN); } /* * A child vport is being created in the fabric. * * Call from vport module at vport creation. A list of base port and vports * belonging to a fabric is maintained to propagate link events. * * param[in] fabric - Fabric instance. This can be a base fabric or vf. * param[in] vport - Vport being created. * * @return None (always succeeds) */ void bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric, struct bfa_fcs_vport_s *vport) { /* * - add vport to fabric's vport_q */ bfa_trc(fabric->fcs, fabric->vf_id); list_add_tail(&vport->qe, &fabric->vport_q); fabric->num_vports++; bfa_wc_up(&fabric->wc); } /* * A child vport is being deleted from fabric. * * Vport is being deleted. */ void bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric, struct bfa_fcs_vport_s *vport) { list_del(&vport->qe); fabric->num_vports--; bfa_wc_down(&fabric->wc); } /* * Lookup for a vport within a fabric given its pwwn */ struct bfa_fcs_vport_s * bfa_fcs_fabric_vport_lookup(struct bfa_fcs_fabric_s *fabric, wwn_t pwwn) { struct bfa_fcs_vport_s *vport; struct list_head *qe; list_for_each(qe, &fabric->vport_q) { vport = (struct bfa_fcs_vport_s *) qe; if (bfa_fcs_lport_get_pwwn(&vport->lport) == pwwn) return vport; } return NULL; } /* * Get OUI of the attached switch. * * Note : Use of this function should be avoided as much as possible. * This function should be used only if there is any requirement * to check for FOS version below 6.3. * To check if the attached fabric is a brocade fabric, use * bfa_lps_is_brcd_fabric() which works for FOS versions 6.3 * or above only. */ u16 bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric) { wwn_t fab_nwwn; u8 *tmp; u16 oui; fab_nwwn = fabric->lps->pr_nwwn; tmp = (u8 *)&fab_nwwn; oui = (tmp[3] << 8) | tmp[4]; return oui; } /* * Unsolicited frame receive handling. */ void bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs, u16 len) { u32 pid = fchs->d_id; struct bfa_fcs_vport_s *vport; struct list_head *qe; struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); struct fc_logi_s *flogi = (struct fc_logi_s *) els_cmd; bfa_trc(fabric->fcs, len); bfa_trc(fabric->fcs, pid); /* * Look for our own FLOGI frames being looped back. This means an * external loopback cable is in place. Our own FLOGI frames are * sometimes looped back when switch port gets temporarily bypassed. */ if ((pid == bfa_ntoh3b(FC_FABRIC_PORT)) && (els_cmd->els_code == FC_ELS_FLOGI) && (flogi->port_name == bfa_fcs_lport_get_pwwn(&fabric->bport))) { bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LOOPBACK); return; } /* * FLOGI/EVFP exchanges should be consumed by base fabric. */ if (fchs->d_id == bfa_hton3b(FC_FABRIC_PORT)) { bfa_trc(fabric->fcs, pid); bfa_fcs_fabric_process_uf(fabric, fchs, len); return; } if (fabric->bport.pid == pid) { /* * All authentication frames should be routed to auth */ bfa_trc(fabric->fcs, els_cmd->els_code); if (els_cmd->els_code == FC_ELS_AUTH) { bfa_trc(fabric->fcs, els_cmd->els_code); return; } bfa_trc(fabric->fcs, *(u8 *) ((u8 *) fchs)); bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len); return; } /* * look for a matching local port ID */ list_for_each(qe, &fabric->vport_q) { vport = (struct bfa_fcs_vport_s *) qe; if (vport->lport.pid == pid) { bfa_fcs_lport_uf_recv(&vport->lport, fchs, len); return; } } if (!bfa_fcs_fabric_is_switched(fabric)) bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len); bfa_trc(fabric->fcs, fchs->type); } /* * Unsolicited frames to be processed by fabric. */ static void bfa_fcs_fabric_process_uf(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs, u16 len) { struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); bfa_trc(fabric->fcs, els_cmd->els_code); switch (els_cmd->els_code) { case FC_ELS_FLOGI: bfa_fcs_fabric_process_flogi(fabric, fchs, len); break; default: /* * need to generate a LS_RJT */ break; } } /* * Process incoming FLOGI */ static void bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs, u16 len) { struct fc_logi_s *flogi = (struct fc_logi_s *) (fchs + 1); struct bfa_fcs_lport_s *bport = &fabric->bport; bfa_trc(fabric->fcs, fchs->s_id); fabric->stats.flogi_rcvd++; /* * Check port type. It should be 0 = n-port. */ if (flogi->csp.port_type) { /* * @todo: may need to send a LS_RJT */ bfa_trc(fabric->fcs, flogi->port_name); fabric->stats.flogi_rejected++; return; } fabric->bb_credit = be16_to_cpu(flogi->csp.bbcred); bport->port_topo.pn2n.rem_port_wwn = flogi->port_name; bport->port_topo.pn2n.reply_oxid = fchs->ox_id; /* * Send a Flogi Acc */ bfa_fcs_fabric_send_flogi_acc(fabric); bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC); } static void bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric) { struct bfa_lport_cfg_s *pcfg = &fabric->bport.port_cfg; struct bfa_fcs_lport_n2n_s *n2n_port = &fabric->bport.port_topo.pn2n; struct bfa_s *bfa = fabric->fcs->bfa; struct bfa_fcxp_s *fcxp; u16 reqlen; struct fchs_s fchs; fcxp = bfa_fcs_fcxp_alloc(fabric->fcs, BFA_FALSE); /* * Do not expect this failure -- expect remote node to retry */ if (!fcxp) return; reqlen = fc_flogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), bfa_hton3b(FC_FABRIC_PORT), n2n_port->reply_oxid, pcfg->pwwn, pcfg->nwwn, bfa_fcport_get_maxfrsize(bfa), bfa_fcport_get_rx_bbcredit(bfa), 0); bfa_fcxp_send(fcxp, NULL, fabric->vf_id, fabric->lps->bfa_tag, BFA_FALSE, FC_CLASS_3, reqlen, &fchs, bfa_fcs_fabric_flogiacc_comp, fabric, FC_MAX_PDUSZ, 0); } /* * Flogi Acc completion callback. */ static void bfa_fcs_fabric_flogiacc_comp(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t status, u32 rsp_len, u32 resid_len, struct fchs_s *rspfchs) { struct bfa_fcs_fabric_s *fabric = cbarg; bfa_trc(fabric->fcs, status); } /* * Send AEN notification */ static void bfa_fcs_fabric_aen_post(struct bfa_fcs_lport_s *port, enum bfa_port_aen_event event) { struct bfad_s *bfad = (struct bfad_s *)port->fabric->fcs->bfad; struct bfa_aen_entry_s *aen_entry; bfad_get_aen_entry(bfad, aen_entry); if (!aen_entry) return; aen_entry->aen_data.port.pwwn = bfa_fcs_lport_get_pwwn(port); aen_entry->aen_data.port.fwwn = bfa_fcs_lport_get_fabric_name(port); /* Send the AEN notification */ bfad_im_post_vendor_event(aen_entry, bfad, ++port->fcs->fcs_aen_seq, BFA_AEN_CAT_PORT, event); } /* * * @param[in] fabric - fabric * @param[in] wwn_t - new fabric name * * @return - none */ void bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric, wwn_t fabric_name) { struct bfad_s *bfad = (struct bfad_s *)fabric->fcs->bfad; char pwwn_ptr[BFA_STRING_32]; char fwwn_ptr[BFA_STRING_32]; bfa_trc(fabric->fcs, fabric_name); if (fabric->fabric_name == 0) { /* * With BRCD switches, we don't get Fabric Name in FLOGI. * Don't generate a fabric name change event in this case. */ fabric->fabric_name = fabric_name; } else { fabric->fabric_name = fabric_name; wwn2str(pwwn_ptr, bfa_fcs_lport_get_pwwn(&fabric->bport)); wwn2str(fwwn_ptr, bfa_fcs_lport_get_fabric_name(&fabric->bport)); BFA_LOG(KERN_WARNING, bfad, bfa_log_level, "Base port WWN = %s Fabric WWN = %s\n", pwwn_ptr, fwwn_ptr); bfa_fcs_fabric_aen_post(&fabric->bport, BFA_PORT_AEN_FABRIC_NAME_CHANGE); } } void bfa_cb_lps_flogo_comp(void *bfad, void *uarg) { struct bfa_fcs_fabric_s *fabric = uarg; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LOGOCOMP); } /* * Returns FCS vf structure for a given vf_id. * * param[in] vf_id - VF_ID * * return * If lookup succeeds, retuns fcs vf object, otherwise returns NULL */ bfa_fcs_vf_t * bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id) { bfa_trc(fcs, vf_id); if (vf_id == FC_VF_ID_NULL) return &fcs->fabric; return NULL; } /* * Return the list of local logical ports present in the given VF. * * @param[in] vf vf for which logical ports are returned * @param[out] lpwwn returned logical port wwn list * @param[in,out] nlports in:size of lpwwn list; * out:total elements present, * actual elements returned is limited by the size */ void bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t lpwwn[], int *nlports) { struct list_head *qe; struct bfa_fcs_vport_s *vport; int i = 0; struct bfa_fcs_s *fcs; if (vf == NULL || lpwwn == NULL || *nlports == 0) return; fcs = vf->fcs; bfa_trc(fcs, vf->vf_id); bfa_trc(fcs, (uint32_t) *nlports); lpwwn[i++] = vf->bport.port_cfg.pwwn; list_for_each(qe, &vf->vport_q) { if (i >= *nlports) break; vport = (struct bfa_fcs_vport_s *) qe; lpwwn[i++] = vport->lport.port_cfg.pwwn; } bfa_trc(fcs, i); *nlports = i; } /* * BFA FCS PPORT ( physical port) */ static void bfa_fcs_port_event_handler(void *cbarg, enum bfa_port_linkstate event) { struct bfa_fcs_s *fcs = cbarg; bfa_trc(fcs, event); switch (event) { case BFA_PORT_LINKUP: bfa_fcs_fabric_link_up(&fcs->fabric); break; case BFA_PORT_LINKDOWN: bfa_fcs_fabric_link_down(&fcs->fabric); break; default: WARN_ON(1); } } /* * BFA FCS UF ( Unsolicited Frames) */ /* * BFA callback for unsolicited frame receive handler. * * @param[in] cbarg callback arg for receive handler * @param[in] uf unsolicited frame descriptor * * @return None */ static void bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf) { struct bfa_fcs_s *fcs = (struct bfa_fcs_s *) cbarg; struct fchs_s *fchs = bfa_uf_get_frmbuf(uf); u16 len = bfa_uf_get_frmlen(uf); struct fc_vft_s *vft; struct bfa_fcs_fabric_s *fabric; /* * check for VFT header */ if (fchs->routing == FC_RTG_EXT_HDR && fchs->cat_info == FC_CAT_VFT_HDR) { bfa_stats(fcs, uf.tagged); vft = bfa_uf_get_frmbuf(uf); if (fcs->port_vfid == vft->vf_id) fabric = &fcs->fabric; else fabric = bfa_fcs_vf_lookup(fcs, (u16) vft->vf_id); /* * drop frame if vfid is unknown */ if (!fabric) { WARN_ON(1); bfa_stats(fcs, uf.vfid_unknown); bfa_uf_free(uf); return; } /* * skip vft header */ fchs = (struct fchs_s *) (vft + 1); len -= sizeof(struct fc_vft_s); bfa_trc(fcs, vft->vf_id); } else { bfa_stats(fcs, uf.untagged); fabric = &fcs->fabric; } bfa_trc(fcs, ((u32 *) fchs)[0]); bfa_trc(fcs, ((u32 *) fchs)[1]); bfa_trc(fcs, ((u32 *) fchs)[2]); bfa_trc(fcs, ((u32 *) fchs)[3]); bfa_trc(fcs, ((u32 *) fchs)[4]); bfa_trc(fcs, ((u32 *) fchs)[5]); bfa_trc(fcs, len); bfa_fcs_fabric_uf_recv(fabric, fchs, len); bfa_uf_free(uf); } /* * fcs attach -- called once to initialize data structures at driver attach time */ void bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad, bfa_boolean_t min_cfg) { struct bfa_fcs_fabric_s *fabric = &fcs->fabric; fcs->bfa = bfa; fcs->bfad = bfad; fcs->min_cfg = min_cfg; fcs->num_rport_logins = 0; bfa->fcs = BFA_TRUE; fcbuild_init(); bfa_fcport_event_register(fcs->bfa, bfa_fcs_port_event_handler, fcs); bfa_uf_recv_register(fcs->bfa, bfa_fcs_uf_recv, fcs); memset(fabric, 0, sizeof(struct bfa_fcs_fabric_s)); /* * Initialize base fabric. */ fabric->fcs = fcs; INIT_LIST_HEAD(&fabric->vport_q); INIT_LIST_HEAD(&fabric->vf_q); fabric->lps = bfa_lps_alloc(fcs->bfa); WARN_ON(!fabric->lps); /* * Initialize fabric delete completion handler. Fabric deletion is * complete when the last vport delete is complete. */ bfa_wc_init(&fabric->wc, bfa_fcs_fabric_delete_comp, fabric); bfa_wc_up(&fabric->wc); /* For the base port */ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit); bfa_fcs_lport_attach(&fabric->bport, fabric->fcs, FC_VF_ID_NULL, NULL); }
linux-master
drivers/scsi/bfa/bfa_fcs.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. * Copyright (c) 2014- QLogic Corporation. * All rights reserved * www.qlogic.com * * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. */ #include "bfad_drv.h" #include "bfad_im.h" #include "bfa_plog.h" #include "bfa_cs.h" #include "bfa_modules.h" BFA_TRC_FILE(HAL, FCXP); /* * LPS related definitions */ #define BFA_LPS_MIN_LPORTS (1) #define BFA_LPS_MAX_LPORTS (256) /* * Maximum Vports supported per physical port or vf. */ #define BFA_LPS_MAX_VPORTS_SUPP_CB 255 #define BFA_LPS_MAX_VPORTS_SUPP_CT 190 /* * FC PORT related definitions */ /* * The port is considered disabled if corresponding physical port or IOC are * disabled explicitly */ #define BFA_PORT_IS_DISABLED(bfa) \ ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \ (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE)) /* * BFA port state machine events */ enum bfa_fcport_sm_event { BFA_FCPORT_SM_START = 1, /* start port state machine */ BFA_FCPORT_SM_STOP = 2, /* stop port state machine */ BFA_FCPORT_SM_ENABLE = 3, /* enable port */ BFA_FCPORT_SM_DISABLE = 4, /* disable port state machine */ BFA_FCPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */ BFA_FCPORT_SM_LINKUP = 6, /* firmware linkup event */ BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */ BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */ BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */ BFA_FCPORT_SM_DPORTENABLE = 10, /* enable dport */ BFA_FCPORT_SM_DPORTDISABLE = 11,/* disable dport */ BFA_FCPORT_SM_FAA_MISCONFIG = 12, /* FAA misconfiguratin */ BFA_FCPORT_SM_DDPORTENABLE = 13, /* enable ddport */ BFA_FCPORT_SM_DDPORTDISABLE = 14, /* disable ddport */ }; /* * BFA port link notification state machine events */ enum bfa_fcport_ln_sm_event { BFA_FCPORT_LN_SM_LINKUP = 1, /* linkup event */ BFA_FCPORT_LN_SM_LINKDOWN = 2, /* linkdown event */ BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */ }; /* * RPORT related definitions */ #define bfa_rport_offline_cb(__rp) do { \ if ((__rp)->bfa->fcs) \ bfa_cb_rport_offline((__rp)->rport_drv); \ else { \ bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \ __bfa_cb_rport_offline, (__rp)); \ } \ } while (0) #define bfa_rport_online_cb(__rp) do { \ if ((__rp)->bfa->fcs) \ bfa_cb_rport_online((__rp)->rport_drv); \ else { \ bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \ __bfa_cb_rport_online, (__rp)); \ } \ } while (0) /* * forward declarations FCXP related functions */ static void __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete); static void hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_rsp_s *fcxp_rsp); static void hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp, struct fchs_s *fchs); static void bfa_fcxp_qresume(void *cbarg); static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req); /* * forward declarations for LPS functions */ static void bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp); static void bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count); static void bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp); static void bfa_lps_reqq_resume(void *lps_arg); static void bfa_lps_free(struct bfa_lps_s *lps); static void bfa_lps_send_login(struct bfa_lps_s *lps); static void bfa_lps_send_logout(struct bfa_lps_s *lps); static void bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps); static void bfa_lps_login_comp(struct bfa_lps_s *lps); static void bfa_lps_logout_comp(struct bfa_lps_s *lps); static void bfa_lps_cvl_event(struct bfa_lps_s *lps); /* * forward declaration for LPS state machine */ static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event); static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event); static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event); static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event); static void bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event); static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event); static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event); /* * forward declaration for FC Port functions */ static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport); static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport); static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport); static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport); static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport); static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete); static void bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event, bfa_boolean_t trunk); static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event); static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete); static void bfa_fcport_stats_get_timeout(void *cbarg); static void bfa_fcport_stats_clr_timeout(void *cbarg); static void bfa_trunk_iocdisable(struct bfa_s *bfa); /* * forward declaration for FC PORT state machine */ static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event); static void bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event); static void bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event); static void bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event); static void bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event); static void bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event); static void bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event); static void bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event); static void bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event); static void bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event); static void bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event); static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event); static void bfa_fcport_sm_dport(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event); static void bfa_fcport_sm_ddport(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event); static void bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event); static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln, enum bfa_fcport_ln_sm_event event); static void bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln, enum bfa_fcport_ln_sm_event event); static void bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln, enum bfa_fcport_ln_sm_event event); static void bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln, enum bfa_fcport_ln_sm_event event); static void bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln, enum bfa_fcport_ln_sm_event event); static void bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln, enum bfa_fcport_ln_sm_event event); static void bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln, enum bfa_fcport_ln_sm_event event); static struct bfa_sm_table_s hal_port_sm_table[] = { {BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT}, {BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT}, {BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING}, {BFA_SM(bfa_fcport_sm_linkdown), BFA_PORT_ST_LINKDOWN}, {BFA_SM(bfa_fcport_sm_linkup), BFA_PORT_ST_LINKUP}, {BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PORT_ST_DISABLING_QWAIT}, {BFA_SM(bfa_fcport_sm_toggling_qwait), BFA_PORT_ST_TOGGLING_QWAIT}, {BFA_SM(bfa_fcport_sm_disabling), BFA_PORT_ST_DISABLING}, {BFA_SM(bfa_fcport_sm_disabled), BFA_PORT_ST_DISABLED}, {BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED}, {BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN}, {BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN}, {BFA_SM(bfa_fcport_sm_dport), BFA_PORT_ST_DPORT}, {BFA_SM(bfa_fcport_sm_ddport), BFA_PORT_ST_DDPORT}, {BFA_SM(bfa_fcport_sm_faa_misconfig), BFA_PORT_ST_FAA_MISCONFIG}, }; /* * forward declaration for RPORT related functions */ static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod); static void bfa_rport_free(struct bfa_rport_s *rport); static bfa_boolean_t bfa_rport_send_fwcreate(struct bfa_rport_s *rp); static bfa_boolean_t bfa_rport_send_fwdelete(struct bfa_rport_s *rp); static bfa_boolean_t bfa_rport_send_fwspeed(struct bfa_rport_s *rp); static void __bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete); static void __bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete); /* * forward declaration for RPORT state machine */ static void bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event); static void bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event); static void bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event); static void bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event); static void bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event); static void bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event); static void bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event); static void bfa_rport_sm_offline_pending(struct bfa_rport_s *rp, enum bfa_rport_event event); static void bfa_rport_sm_delete_pending(struct bfa_rport_s *rp, enum bfa_rport_event event); static void bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event); static void bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event); static void bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event); static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event); /* * PLOG related definitions */ static int plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec) { if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) && (pl_rec->log_type != BFA_PL_LOG_TYPE_STRING)) return 1; if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) && (pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ)) return 1; return 0; } static void bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec) { u16 tail; struct bfa_plog_rec_s *pl_recp; if (plog->plog_enabled == 0) return; if (plkd_validate_logrec(pl_rec)) { WARN_ON(1); return; } tail = plog->tail; pl_recp = &(plog->plog_recs[tail]); memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s)); pl_recp->tv = ktime_get_real_seconds(); BFA_PL_LOG_REC_INCR(plog->tail); if (plog->head == plog->tail) BFA_PL_LOG_REC_INCR(plog->head); } void bfa_plog_init(struct bfa_plog_s *plog) { memset((char *)plog, 0, sizeof(struct bfa_plog_s)); memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN); plog->head = plog->tail = 0; plog->plog_enabled = 1; } void bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid, enum bfa_plog_eid event, u16 misc, char *log_str) { struct bfa_plog_rec_s lp; if (plog->plog_enabled) { memset(&lp, 0, sizeof(struct bfa_plog_rec_s)); lp.mid = mid; lp.eid = event; lp.log_type = BFA_PL_LOG_TYPE_STRING; lp.misc = misc; strscpy(lp.log_entry.string_log, log_str, BFA_PL_STRING_LOG_SZ); lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0'; bfa_plog_add(plog, &lp); } } void bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid, enum bfa_plog_eid event, u16 misc, u32 *intarr, u32 num_ints) { struct bfa_plog_rec_s lp; u32 i; if (num_ints > BFA_PL_INT_LOG_SZ) num_ints = BFA_PL_INT_LOG_SZ; if (plog->plog_enabled) { memset(&lp, 0, sizeof(struct bfa_plog_rec_s)); lp.mid = mid; lp.eid = event; lp.log_type = BFA_PL_LOG_TYPE_INT; lp.misc = misc; for (i = 0; i < num_ints; i++) lp.log_entry.int_log[i] = intarr[i]; lp.log_num_ints = (u8) num_ints; bfa_plog_add(plog, &lp); } } void bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid, enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr) { u32 *tmp_int = (u32 *) fchdr; u32 ints[BFA_PL_INT_LOG_SZ]; if (plog->plog_enabled) { ints[0] = tmp_int[0]; ints[1] = tmp_int[1]; ints[2] = tmp_int[4]; bfa_plog_intarr(plog, mid, event, misc, ints, 3); } } void bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid, enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr, u32 pld_w0) { u32 *tmp_int = (u32 *) fchdr; u32 ints[BFA_PL_INT_LOG_SZ]; if (plog->plog_enabled) { ints[0] = tmp_int[0]; ints[1] = tmp_int[1]; ints[2] = tmp_int[4]; ints[3] = pld_w0; bfa_plog_intarr(plog, mid, event, misc, ints, 4); } } /* * fcxp_pvt BFA FCXP private functions */ static void claim_fcxps_mem(struct bfa_fcxp_mod_s *mod) { u16 i; struct bfa_fcxp_s *fcxp; fcxp = (struct bfa_fcxp_s *) bfa_mem_kva_curp(mod); memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps); INIT_LIST_HEAD(&mod->fcxp_req_free_q); INIT_LIST_HEAD(&mod->fcxp_rsp_free_q); INIT_LIST_HEAD(&mod->fcxp_active_q); INIT_LIST_HEAD(&mod->fcxp_req_unused_q); INIT_LIST_HEAD(&mod->fcxp_rsp_unused_q); mod->fcxp_list = fcxp; for (i = 0; i < mod->num_fcxps; i++) { fcxp->fcxp_mod = mod; fcxp->fcxp_tag = i; if (i < (mod->num_fcxps / 2)) { list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q); fcxp->req_rsp = BFA_TRUE; } else { list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q); fcxp->req_rsp = BFA_FALSE; } bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp); fcxp->reqq_waiting = BFA_FALSE; fcxp = fcxp + 1; } bfa_mem_kva_curp(mod) = (void *)fcxp; } void bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo, struct bfa_s *bfa) { struct bfa_fcxp_mod_s *fcxp_mod = BFA_FCXP_MOD(bfa); struct bfa_mem_kva_s *fcxp_kva = BFA_MEM_FCXP_KVA(bfa); struct bfa_mem_dma_s *seg_ptr; u16 nsegs, idx, per_seg_fcxp; u16 num_fcxps = cfg->fwcfg.num_fcxp_reqs; u32 per_fcxp_sz; if (num_fcxps == 0) return; if (cfg->drvcfg.min_cfg) per_fcxp_sz = 2 * BFA_FCXP_MAX_IBUF_SZ; else per_fcxp_sz = BFA_FCXP_MAX_IBUF_SZ + BFA_FCXP_MAX_LBUF_SZ; /* dma memory */ nsegs = BFI_MEM_DMA_NSEGS(num_fcxps, per_fcxp_sz); per_seg_fcxp = BFI_MEM_NREQS_SEG(per_fcxp_sz); bfa_mem_dma_seg_iter(fcxp_mod, seg_ptr, nsegs, idx) { if (num_fcxps >= per_seg_fcxp) { num_fcxps -= per_seg_fcxp; bfa_mem_dma_setup(minfo, seg_ptr, per_seg_fcxp * per_fcxp_sz); } else bfa_mem_dma_setup(minfo, seg_ptr, num_fcxps * per_fcxp_sz); } /* kva memory */ bfa_mem_kva_setup(minfo, fcxp_kva, cfg->fwcfg.num_fcxp_reqs * sizeof(struct bfa_fcxp_s)); } void bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev) { struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); mod->bfa = bfa; mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs; /* * Initialize FCXP request and response payload sizes. */ mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ; if (!cfg->drvcfg.min_cfg) mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ; INIT_LIST_HEAD(&mod->req_wait_q); INIT_LIST_HEAD(&mod->rsp_wait_q); claim_fcxps_mem(mod); } void bfa_fcxp_iocdisable(struct bfa_s *bfa) { struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); struct bfa_fcxp_s *fcxp; struct list_head *qe, *qen; /* Enqueue unused fcxp resources to free_q */ list_splice_tail_init(&mod->fcxp_req_unused_q, &mod->fcxp_req_free_q); list_splice_tail_init(&mod->fcxp_rsp_unused_q, &mod->fcxp_rsp_free_q); list_for_each_safe(qe, qen, &mod->fcxp_active_q) { fcxp = (struct bfa_fcxp_s *) qe; if (fcxp->caller == NULL) { fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg, BFA_STATUS_IOC_FAILURE, 0, 0, NULL); bfa_fcxp_free(fcxp); } else { fcxp->rsp_status = BFA_STATUS_IOC_FAILURE; bfa_cb_queue(bfa, &fcxp->hcb_qe, __bfa_fcxp_send_cbfn, fcxp); } } } static struct bfa_fcxp_s * bfa_fcxp_get(struct bfa_fcxp_mod_s *fm, bfa_boolean_t req) { struct bfa_fcxp_s *fcxp; if (req) bfa_q_deq(&fm->fcxp_req_free_q, &fcxp); else bfa_q_deq(&fm->fcxp_rsp_free_q, &fcxp); if (fcxp) list_add_tail(&fcxp->qe, &fm->fcxp_active_q); return fcxp; } static void bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp, struct bfa_s *bfa, u8 *use_ibuf, u32 *nr_sgles, bfa_fcxp_get_sgaddr_t *r_sga_cbfn, bfa_fcxp_get_sglen_t *r_sglen_cbfn, struct list_head *r_sgpg_q, int n_sgles, bfa_fcxp_get_sgaddr_t sga_cbfn, bfa_fcxp_get_sglen_t sglen_cbfn) { WARN_ON(bfa == NULL); bfa_trc(bfa, fcxp->fcxp_tag); if (n_sgles == 0) { *use_ibuf = 1; } else { WARN_ON(*sga_cbfn == NULL); WARN_ON(*sglen_cbfn == NULL); *use_ibuf = 0; *r_sga_cbfn = sga_cbfn; *r_sglen_cbfn = sglen_cbfn; *nr_sgles = n_sgles; /* * alloc required sgpgs */ if (n_sgles > BFI_SGE_INLINE) WARN_ON(1); } } static void bfa_fcxp_init(struct bfa_fcxp_s *fcxp, void *caller, struct bfa_s *bfa, int nreq_sgles, int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn, bfa_fcxp_get_sglen_t req_sglen_cbfn, bfa_fcxp_get_sgaddr_t rsp_sga_cbfn, bfa_fcxp_get_sglen_t rsp_sglen_cbfn) { WARN_ON(bfa == NULL); bfa_trc(bfa, fcxp->fcxp_tag); fcxp->caller = caller; bfa_fcxp_init_reqrsp(fcxp, bfa, &fcxp->use_ireqbuf, &fcxp->nreq_sgles, &fcxp->req_sga_cbfn, &fcxp->req_sglen_cbfn, &fcxp->req_sgpg_q, nreq_sgles, req_sga_cbfn, req_sglen_cbfn); bfa_fcxp_init_reqrsp(fcxp, bfa, &fcxp->use_irspbuf, &fcxp->nrsp_sgles, &fcxp->rsp_sga_cbfn, &fcxp->rsp_sglen_cbfn, &fcxp->rsp_sgpg_q, nrsp_sgles, rsp_sga_cbfn, rsp_sglen_cbfn); } static void bfa_fcxp_put(struct bfa_fcxp_s *fcxp) { struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; struct bfa_fcxp_wqe_s *wqe; if (fcxp->req_rsp) bfa_q_deq(&mod->req_wait_q, &wqe); else bfa_q_deq(&mod->rsp_wait_q, &wqe); if (wqe) { bfa_trc(mod->bfa, fcxp->fcxp_tag); bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles, wqe->nrsp_sgles, wqe->req_sga_cbfn, wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn, wqe->rsp_sglen_cbfn); wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp); return; } WARN_ON(!bfa_q_is_on_q(&mod->fcxp_active_q, fcxp)); list_del(&fcxp->qe); if (fcxp->req_rsp) list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q); else list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q); } static void bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs) { /* discarded fcxp completion */ } static void __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete) { struct bfa_fcxp_s *fcxp = cbarg; if (complete) { fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg, fcxp->rsp_status, fcxp->rsp_len, fcxp->residue_len, &fcxp->rsp_fchs); } else { bfa_fcxp_free(fcxp); } } static void hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp) { struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); struct bfa_fcxp_s *fcxp; u16 fcxp_tag = be16_to_cpu(fcxp_rsp->fcxp_tag); bfa_trc(bfa, fcxp_tag); fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len); /* * @todo f/w should not set residue to non-0 when everything * is received. */ if (fcxp_rsp->req_status == BFA_STATUS_OK) fcxp_rsp->residue_len = 0; else fcxp_rsp->residue_len = be32_to_cpu(fcxp_rsp->residue_len); fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag); WARN_ON(fcxp->send_cbfn == NULL); hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp); if (fcxp->send_cbfn != NULL) { bfa_trc(mod->bfa, (NULL == fcxp->caller)); if (fcxp->caller == NULL) { fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg, fcxp_rsp->req_status, fcxp_rsp->rsp_len, fcxp_rsp->residue_len, &fcxp_rsp->fchs); /* * fcxp automatically freed on return from the callback */ bfa_fcxp_free(fcxp); } else { fcxp->rsp_status = fcxp_rsp->req_status; fcxp->rsp_len = fcxp_rsp->rsp_len; fcxp->residue_len = fcxp_rsp->residue_len; fcxp->rsp_fchs = fcxp_rsp->fchs; bfa_cb_queue(bfa, &fcxp->hcb_qe, __bfa_fcxp_send_cbfn, fcxp); } } else { bfa_trc(bfa, (NULL == fcxp->send_cbfn)); } } static void hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp, struct fchs_s *fchs) { /* * TODO: TX ox_id */ if (reqlen > 0) { if (fcxp->use_ireqbuf) { u32 pld_w0 = *((u32 *) BFA_FCXP_REQ_PLD(fcxp)); bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX, reqlen + sizeof(struct fchs_s), fchs, pld_w0); } else { bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX, reqlen + sizeof(struct fchs_s), fchs); } } else { bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX, reqlen + sizeof(struct fchs_s), fchs); } } static void hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_rsp_s *fcxp_rsp) { if (fcxp_rsp->rsp_len > 0) { if (fcxp->use_irspbuf) { u32 pld_w0 = *((u32 *) BFA_FCXP_RSP_PLD(fcxp)); bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX, (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs, pld_w0); } else { bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX, (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs); } } else { bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX, (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs); } } /* * Handler to resume sending fcxp when space in available in cpe queue. */ static void bfa_fcxp_qresume(void *cbarg) { struct bfa_fcxp_s *fcxp = cbarg; struct bfa_s *bfa = fcxp->fcxp_mod->bfa; struct bfi_fcxp_send_req_s *send_req; fcxp->reqq_waiting = BFA_FALSE; send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP); bfa_fcxp_queue(fcxp, send_req); } /* * Queue fcxp send request to foimrware. */ static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req) { struct bfa_s *bfa = fcxp->fcxp_mod->bfa; struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info; struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info; struct bfa_rport_s *rport = reqi->bfa_rport; bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ, bfa_fn_lpu(bfa)); send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag); if (rport) { send_req->rport_fw_hndl = rport->fw_handle; send_req->max_frmsz = cpu_to_be16(rport->rport_info.max_frmsz); if (send_req->max_frmsz == 0) send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ); } else { send_req->rport_fw_hndl = 0; send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ); } send_req->vf_id = cpu_to_be16(reqi->vf_id); send_req->lp_fwtag = bfa_lps_get_fwtag(bfa, reqi->lp_tag); send_req->class = reqi->class; send_req->rsp_timeout = rspi->rsp_timeout; send_req->cts = reqi->cts; send_req->fchs = reqi->fchs; send_req->req_len = cpu_to_be32(reqi->req_tot_len); send_req->rsp_maxlen = cpu_to_be32(rspi->rsp_maxlen); /* * setup req sgles */ if (fcxp->use_ireqbuf == 1) { bfa_alen_set(&send_req->req_alen, reqi->req_tot_len, BFA_FCXP_REQ_PLD_PA(fcxp)); } else { if (fcxp->nreq_sgles > 0) { WARN_ON(fcxp->nreq_sgles != 1); bfa_alen_set(&send_req->req_alen, reqi->req_tot_len, fcxp->req_sga_cbfn(fcxp->caller, 0)); } else { WARN_ON(reqi->req_tot_len != 0); bfa_alen_set(&send_req->rsp_alen, 0, 0); } } /* * setup rsp sgles */ if (fcxp->use_irspbuf == 1) { WARN_ON(rspi->rsp_maxlen > BFA_FCXP_MAX_LBUF_SZ); bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen, BFA_FCXP_RSP_PLD_PA(fcxp)); } else { if (fcxp->nrsp_sgles > 0) { WARN_ON(fcxp->nrsp_sgles != 1); bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen, fcxp->rsp_sga_cbfn(fcxp->caller, 0)); } else { WARN_ON(rspi->rsp_maxlen != 0); bfa_alen_set(&send_req->rsp_alen, 0, 0); } } hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs); bfa_reqq_produce(bfa, BFA_REQQ_FCXP, send_req->mh); bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP)); bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP)); } /* * Allocate an FCXP instance to send a response or to send a request * that has a response. Request/response buffers are allocated by caller. * * @param[in] bfa BFA bfa instance * @param[in] nreq_sgles Number of SG elements required for request * buffer. 0, if fcxp internal buffers are used. * Use bfa_fcxp_get_reqbuf() to get the * internal req buffer. * @param[in] req_sgles SG elements describing request buffer. Will be * copied in by BFA and hence can be freed on * return from this function. * @param[in] get_req_sga function ptr to be called to get a request SG * Address (given the sge index). * @param[in] get_req_sglen function ptr to be called to get a request SG * len (given the sge index). * @param[in] get_rsp_sga function ptr to be called to get a response SG * Address (given the sge index). * @param[in] get_rsp_sglen function ptr to be called to get a response SG * len (given the sge index). * @param[in] req Allocated FCXP is used to send req or rsp? * request - BFA_TRUE, response - BFA_FALSE * * @return FCXP instance. NULL on failure. */ struct bfa_fcxp_s * bfa_fcxp_req_rsp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles, int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn, bfa_fcxp_get_sglen_t req_sglen_cbfn, bfa_fcxp_get_sgaddr_t rsp_sga_cbfn, bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req) { struct bfa_fcxp_s *fcxp = NULL; WARN_ON(bfa == NULL); fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa), req); if (fcxp == NULL) return NULL; bfa_trc(bfa, fcxp->fcxp_tag); bfa_fcxp_init(fcxp, caller, bfa, nreq_sgles, nrsp_sgles, req_sga_cbfn, req_sglen_cbfn, rsp_sga_cbfn, rsp_sglen_cbfn); return fcxp; } /* * Get the internal request buffer pointer * * @param[in] fcxp BFA fcxp pointer * * @return pointer to the internal request buffer */ void * bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp) { struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; void *reqbuf; WARN_ON(fcxp->use_ireqbuf != 1); reqbuf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag, mod->req_pld_sz + mod->rsp_pld_sz); return reqbuf; } u32 bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp) { struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; return mod->req_pld_sz; } /* * Get the internal response buffer pointer * * @param[in] fcxp BFA fcxp pointer * * @return pointer to the internal request buffer */ void * bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp) { struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; void *fcxp_buf; WARN_ON(fcxp->use_irspbuf != 1); fcxp_buf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag, mod->req_pld_sz + mod->rsp_pld_sz); /* fcxp_buf = req_buf + rsp_buf :- add req_buf_sz to get to rsp_buf */ return ((u8 *) fcxp_buf) + mod->req_pld_sz; } /* * Free the BFA FCXP * * @param[in] fcxp BFA fcxp pointer * * @return void */ void bfa_fcxp_free(struct bfa_fcxp_s *fcxp) { struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; WARN_ON(fcxp == NULL); bfa_trc(mod->bfa, fcxp->fcxp_tag); bfa_fcxp_put(fcxp); } /* * Send a FCXP request * * @param[in] fcxp BFA fcxp pointer * @param[in] rport BFA rport pointer. Could be left NULL for WKA rports * @param[in] vf_id virtual Fabric ID * @param[in] lp_tag lport tag * @param[in] cts use Continuous sequence * @param[in] cos fc Class of Service * @param[in] reqlen request length, does not include FCHS length * @param[in] fchs fc Header Pointer. The header content will be copied * in by BFA. * * @param[in] cbfn call back function to be called on receiving * the response * @param[in] cbarg arg for cbfn * @param[in] rsp_timeout * response timeout * * @return bfa_status_t */ void bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport, u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos, u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn, void *cbarg, u32 rsp_maxlen, u8 rsp_timeout) { struct bfa_s *bfa = fcxp->fcxp_mod->bfa; struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info; struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info; struct bfi_fcxp_send_req_s *send_req; bfa_trc(bfa, fcxp->fcxp_tag); /* * setup request/response info */ reqi->bfa_rport = rport; reqi->vf_id = vf_id; reqi->lp_tag = lp_tag; reqi->class = cos; rspi->rsp_timeout = rsp_timeout; reqi->cts = cts; reqi->fchs = *fchs; reqi->req_tot_len = reqlen; rspi->rsp_maxlen = rsp_maxlen; fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp; fcxp->send_cbarg = cbarg; /* * If no room in CPE queue, wait for space in request queue */ send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP); if (!send_req) { bfa_trc(bfa, fcxp->fcxp_tag); fcxp->reqq_waiting = BFA_TRUE; bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe); return; } bfa_fcxp_queue(fcxp, send_req); } /* * Abort a BFA FCXP * * @param[in] fcxp BFA fcxp pointer * * @return void */ bfa_status_t bfa_fcxp_abort(struct bfa_fcxp_s *fcxp) { bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag); WARN_ON(1); return BFA_STATUS_OK; } void bfa_fcxp_req_rsp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe, bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg, void *caller, int nreq_sgles, int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn, bfa_fcxp_get_sglen_t req_sglen_cbfn, bfa_fcxp_get_sgaddr_t rsp_sga_cbfn, bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req) { struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); if (req) WARN_ON(!list_empty(&mod->fcxp_req_free_q)); else WARN_ON(!list_empty(&mod->fcxp_rsp_free_q)); wqe->alloc_cbfn = alloc_cbfn; wqe->alloc_cbarg = alloc_cbarg; wqe->caller = caller; wqe->bfa = bfa; wqe->nreq_sgles = nreq_sgles; wqe->nrsp_sgles = nrsp_sgles; wqe->req_sga_cbfn = req_sga_cbfn; wqe->req_sglen_cbfn = req_sglen_cbfn; wqe->rsp_sga_cbfn = rsp_sga_cbfn; wqe->rsp_sglen_cbfn = rsp_sglen_cbfn; if (req) list_add_tail(&wqe->qe, &mod->req_wait_q); else list_add_tail(&wqe->qe, &mod->rsp_wait_q); } void bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe) { struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); WARN_ON(!bfa_q_is_on_q(&mod->req_wait_q, wqe) || !bfa_q_is_on_q(&mod->rsp_wait_q, wqe)); list_del(&wqe->qe); } void bfa_fcxp_discard(struct bfa_fcxp_s *fcxp) { /* * If waiting for room in request queue, cancel reqq wait * and free fcxp. */ if (fcxp->reqq_waiting) { fcxp->reqq_waiting = BFA_FALSE; bfa_reqq_wcancel(&fcxp->reqq_wqe); bfa_fcxp_free(fcxp); return; } fcxp->send_cbfn = bfa_fcxp_null_comp; } void bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) { switch (msg->mhdr.msg_id) { case BFI_FCXP_I2H_SEND_RSP: hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg); break; default: bfa_trc(bfa, msg->mhdr.msg_id); WARN_ON(1); } } u32 bfa_fcxp_get_maxrsp(struct bfa_s *bfa) { struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); return mod->rsp_pld_sz; } void bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw) { struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); struct list_head *qe; int i; for (i = 0; i < (mod->num_fcxps - num_fcxp_fw); i++) { if (i < ((mod->num_fcxps - num_fcxp_fw) / 2)) { bfa_q_deq_tail(&mod->fcxp_req_free_q, &qe); list_add_tail(qe, &mod->fcxp_req_unused_q); } else { bfa_q_deq_tail(&mod->fcxp_rsp_free_q, &qe); list_add_tail(qe, &mod->fcxp_rsp_unused_q); } } } /* * BFA LPS state machine functions */ /* * Init state -- no login */ static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event) { bfa_trc(lps->bfa, lps->bfa_tag); bfa_trc(lps->bfa, event); switch (event) { case BFA_LPS_SM_LOGIN: if (bfa_reqq_full(lps->bfa, lps->reqq)) { bfa_sm_set_state(lps, bfa_lps_sm_loginwait); bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe); } else { bfa_sm_set_state(lps, bfa_lps_sm_login); bfa_lps_send_login(lps); } if (lps->fdisc) bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, BFA_PL_EID_LOGIN, 0, "FDISC Request"); else bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, BFA_PL_EID_LOGIN, 0, "FLOGI Request"); break; case BFA_LPS_SM_LOGOUT: bfa_lps_logout_comp(lps); break; case BFA_LPS_SM_DELETE: bfa_lps_free(lps); break; case BFA_LPS_SM_RX_CVL: case BFA_LPS_SM_OFFLINE: break; case BFA_LPS_SM_FWRSP: /* * Could happen when fabric detects loopback and discards * the lps request. Fw will eventually sent out the timeout * Just ignore */ break; case BFA_LPS_SM_SET_N2N_PID: /* * When topology is set to loop, bfa_lps_set_n2n_pid() sends * this event. Ignore this event. */ break; default: bfa_sm_fault(lps->bfa, event); } } /* * login is in progress -- awaiting response from firmware */ static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event) { bfa_trc(lps->bfa, lps->bfa_tag); bfa_trc(lps->bfa, event); switch (event) { case BFA_LPS_SM_FWRSP: if (lps->status == BFA_STATUS_OK) { bfa_sm_set_state(lps, bfa_lps_sm_online); if (lps->fdisc) bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, BFA_PL_EID_LOGIN, 0, "FDISC Accept"); else bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, BFA_PL_EID_LOGIN, 0, "FLOGI Accept"); /* If N2N, send the assigned PID to FW */ bfa_trc(lps->bfa, lps->fport); bfa_trc(lps->bfa, lps->lp_pid); if (!lps->fport && lps->lp_pid) bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID); } else { bfa_sm_set_state(lps, bfa_lps_sm_init); if (lps->fdisc) bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, BFA_PL_EID_LOGIN, 0, "FDISC Fail (RJT or timeout)"); else bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, BFA_PL_EID_LOGIN, 0, "FLOGI Fail (RJT or timeout)"); } bfa_lps_login_comp(lps); break; case BFA_LPS_SM_OFFLINE: case BFA_LPS_SM_DELETE: bfa_sm_set_state(lps, bfa_lps_sm_init); break; case BFA_LPS_SM_SET_N2N_PID: bfa_trc(lps->bfa, lps->fport); bfa_trc(lps->bfa, lps->lp_pid); break; default: bfa_sm_fault(lps->bfa, event); } } /* * login pending - awaiting space in request queue */ static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event) { bfa_trc(lps->bfa, lps->bfa_tag); bfa_trc(lps->bfa, event); switch (event) { case BFA_LPS_SM_RESUME: bfa_sm_set_state(lps, bfa_lps_sm_login); bfa_lps_send_login(lps); break; case BFA_LPS_SM_OFFLINE: case BFA_LPS_SM_DELETE: bfa_sm_set_state(lps, bfa_lps_sm_init); bfa_reqq_wcancel(&lps->wqe); break; case BFA_LPS_SM_RX_CVL: /* * Login was not even sent out; so when getting out * of this state, it will appear like a login retry * after Clear virtual link */ break; default: bfa_sm_fault(lps->bfa, event); } } /* * login complete */ static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event) { bfa_trc(lps->bfa, lps->bfa_tag); bfa_trc(lps->bfa, event); switch (event) { case BFA_LPS_SM_LOGOUT: if (bfa_reqq_full(lps->bfa, lps->reqq)) { bfa_sm_set_state(lps, bfa_lps_sm_logowait); bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe); } else { bfa_sm_set_state(lps, bfa_lps_sm_logout); bfa_lps_send_logout(lps); } bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, BFA_PL_EID_LOGO, 0, "Logout"); break; case BFA_LPS_SM_RX_CVL: bfa_sm_set_state(lps, bfa_lps_sm_init); /* Let the vport module know about this event */ bfa_lps_cvl_event(lps); bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx"); break; case BFA_LPS_SM_SET_N2N_PID: if (bfa_reqq_full(lps->bfa, lps->reqq)) { bfa_sm_set_state(lps, bfa_lps_sm_online_n2n_pid_wait); bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe); } else bfa_lps_send_set_n2n_pid(lps); break; case BFA_LPS_SM_OFFLINE: case BFA_LPS_SM_DELETE: bfa_sm_set_state(lps, bfa_lps_sm_init); break; default: bfa_sm_fault(lps->bfa, event); } } /* * login complete */ static void bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event) { bfa_trc(lps->bfa, lps->bfa_tag); bfa_trc(lps->bfa, event); switch (event) { case BFA_LPS_SM_RESUME: bfa_sm_set_state(lps, bfa_lps_sm_online); bfa_lps_send_set_n2n_pid(lps); break; case BFA_LPS_SM_LOGOUT: bfa_sm_set_state(lps, bfa_lps_sm_logowait); bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, BFA_PL_EID_LOGO, 0, "Logout"); break; case BFA_LPS_SM_RX_CVL: bfa_sm_set_state(lps, bfa_lps_sm_init); bfa_reqq_wcancel(&lps->wqe); /* Let the vport module know about this event */ bfa_lps_cvl_event(lps); bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx"); break; case BFA_LPS_SM_OFFLINE: case BFA_LPS_SM_DELETE: bfa_sm_set_state(lps, bfa_lps_sm_init); bfa_reqq_wcancel(&lps->wqe); break; default: bfa_sm_fault(lps->bfa, event); } } /* * logout in progress - awaiting firmware response */ static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event) { bfa_trc(lps->bfa, lps->bfa_tag); bfa_trc(lps->bfa, event); switch (event) { case BFA_LPS_SM_FWRSP: case BFA_LPS_SM_OFFLINE: bfa_sm_set_state(lps, bfa_lps_sm_init); bfa_lps_logout_comp(lps); break; case BFA_LPS_SM_DELETE: bfa_sm_set_state(lps, bfa_lps_sm_init); break; default: bfa_sm_fault(lps->bfa, event); } } /* * logout pending -- awaiting space in request queue */ static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event) { bfa_trc(lps->bfa, lps->bfa_tag); bfa_trc(lps->bfa, event); switch (event) { case BFA_LPS_SM_RESUME: bfa_sm_set_state(lps, bfa_lps_sm_logout); bfa_lps_send_logout(lps); break; case BFA_LPS_SM_OFFLINE: case BFA_LPS_SM_DELETE: bfa_sm_set_state(lps, bfa_lps_sm_init); bfa_reqq_wcancel(&lps->wqe); break; default: bfa_sm_fault(lps->bfa, event); } } /* * lps_pvt BFA LPS private functions */ /* * return memory requirement */ void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo, struct bfa_s *bfa) { struct bfa_mem_kva_s *lps_kva = BFA_MEM_LPS_KVA(bfa); if (cfg->drvcfg.min_cfg) bfa_mem_kva_setup(minfo, lps_kva, sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS); else bfa_mem_kva_setup(minfo, lps_kva, sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS); } /* * bfa module attach at initialization time */ void bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev) { struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); struct bfa_lps_s *lps; int i; mod->num_lps = BFA_LPS_MAX_LPORTS; if (cfg->drvcfg.min_cfg) mod->num_lps = BFA_LPS_MIN_LPORTS; else mod->num_lps = BFA_LPS_MAX_LPORTS; mod->lps_arr = lps = (struct bfa_lps_s *) bfa_mem_kva_curp(mod); bfa_mem_kva_curp(mod) += mod->num_lps * sizeof(struct bfa_lps_s); INIT_LIST_HEAD(&mod->lps_free_q); INIT_LIST_HEAD(&mod->lps_active_q); INIT_LIST_HEAD(&mod->lps_login_q); for (i = 0; i < mod->num_lps; i++, lps++) { lps->bfa = bfa; lps->bfa_tag = (u8) i; lps->reqq = BFA_REQQ_LPS; bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps); list_add_tail(&lps->qe, &mod->lps_free_q); } } /* * IOC in disabled state -- consider all lps offline */ void bfa_lps_iocdisable(struct bfa_s *bfa) { struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); struct bfa_lps_s *lps; struct list_head *qe, *qen; list_for_each_safe(qe, qen, &mod->lps_active_q) { lps = (struct bfa_lps_s *) qe; bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE); } list_for_each_safe(qe, qen, &mod->lps_login_q) { lps = (struct bfa_lps_s *) qe; bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE); } list_splice_tail_init(&mod->lps_login_q, &mod->lps_active_q); } /* * Firmware login response */ static void bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp) { struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); struct bfa_lps_s *lps; WARN_ON(rsp->bfa_tag >= mod->num_lps); lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag); lps->status = rsp->status; switch (rsp->status) { case BFA_STATUS_OK: lps->fw_tag = rsp->fw_tag; lps->fport = rsp->f_port; if (lps->fport) lps->lp_pid = rsp->lp_pid; lps->npiv_en = rsp->npiv_en; lps->pr_bbcred = be16_to_cpu(rsp->bb_credit); lps->pr_pwwn = rsp->port_name; lps->pr_nwwn = rsp->node_name; lps->auth_req = rsp->auth_req; lps->lp_mac = rsp->lp_mac; lps->brcd_switch = rsp->brcd_switch; lps->fcf_mac = rsp->fcf_mac; break; case BFA_STATUS_FABRIC_RJT: lps->lsrjt_rsn = rsp->lsrjt_rsn; lps->lsrjt_expl = rsp->lsrjt_expl; break; case BFA_STATUS_EPROTOCOL: lps->ext_status = rsp->ext_status; break; case BFA_STATUS_VPORT_MAX: if (rsp->ext_status) bfa_lps_no_res(lps, rsp->ext_status); break; default: /* Nothing to do with other status */ break; } list_del(&lps->qe); list_add_tail(&lps->qe, &mod->lps_active_q); bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP); } static void bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count) { struct bfa_s *bfa = first_lps->bfa; struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); struct list_head *qe, *qe_next; struct bfa_lps_s *lps; bfa_trc(bfa, count); qe = bfa_q_next(first_lps); while (count && qe) { qe_next = bfa_q_next(qe); lps = (struct bfa_lps_s *)qe; bfa_trc(bfa, lps->bfa_tag); lps->status = first_lps->status; list_del(&lps->qe); list_add_tail(&lps->qe, &mod->lps_active_q); bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP); qe = qe_next; count--; } } /* * Firmware logout response */ static void bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp) { struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); struct bfa_lps_s *lps; WARN_ON(rsp->bfa_tag >= mod->num_lps); lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag); bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP); } /* * Firmware received a Clear virtual link request (for FCoE) */ static void bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl) { struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); struct bfa_lps_s *lps; lps = BFA_LPS_FROM_TAG(mod, cvl->bfa_tag); bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL); } /* * Space is available in request queue, resume queueing request to firmware. */ static void bfa_lps_reqq_resume(void *lps_arg) { struct bfa_lps_s *lps = lps_arg; bfa_sm_send_event(lps, BFA_LPS_SM_RESUME); } /* * lps is freed -- triggered by vport delete */ static void bfa_lps_free(struct bfa_lps_s *lps) { struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa); lps->lp_pid = 0; list_del(&lps->qe); list_add_tail(&lps->qe, &mod->lps_free_q); } /* * send login request to firmware */ static void bfa_lps_send_login(struct bfa_lps_s *lps) { struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa); struct bfi_lps_login_req_s *m; m = bfa_reqq_next(lps->bfa, lps->reqq); WARN_ON(!m); bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ, bfa_fn_lpu(lps->bfa)); m->bfa_tag = lps->bfa_tag; m->alpa = lps->alpa; m->pdu_size = cpu_to_be16(lps->pdusz); m->pwwn = lps->pwwn; m->nwwn = lps->nwwn; m->fdisc = lps->fdisc; m->auth_en = lps->auth_en; bfa_reqq_produce(lps->bfa, lps->reqq, m->mh); list_del(&lps->qe); list_add_tail(&lps->qe, &mod->lps_login_q); } /* * send logout request to firmware */ static void bfa_lps_send_logout(struct bfa_lps_s *lps) { struct bfi_lps_logout_req_s *m; m = bfa_reqq_next(lps->bfa, lps->reqq); WARN_ON(!m); bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ, bfa_fn_lpu(lps->bfa)); m->fw_tag = lps->fw_tag; m->port_name = lps->pwwn; bfa_reqq_produce(lps->bfa, lps->reqq, m->mh); } /* * send n2n pid set request to firmware */ static void bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps) { struct bfi_lps_n2n_pid_req_s *m; m = bfa_reqq_next(lps->bfa, lps->reqq); WARN_ON(!m); bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_N2N_PID_REQ, bfa_fn_lpu(lps->bfa)); m->fw_tag = lps->fw_tag; m->lp_pid = lps->lp_pid; bfa_reqq_produce(lps->bfa, lps->reqq, m->mh); } /* * Indirect login completion handler for non-fcs */ static void bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete) { struct bfa_lps_s *lps = arg; if (!complete) return; if (lps->fdisc) bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status); else bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status); } /* * Login completion handler -- direct call for fcs, queue for others */ static void bfa_lps_login_comp(struct bfa_lps_s *lps) { if (!lps->bfa->fcs) { bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_login_comp_cb, lps); return; } if (lps->fdisc) bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status); else bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status); } /* * Indirect logout completion handler for non-fcs */ static void bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete) { struct bfa_lps_s *lps = arg; if (!complete) return; if (lps->fdisc) bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg); else bfa_cb_lps_flogo_comp(lps->bfa->bfad, lps->uarg); } /* * Logout completion handler -- direct call for fcs, queue for others */ static void bfa_lps_logout_comp(struct bfa_lps_s *lps) { if (!lps->bfa->fcs) { bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_logout_comp_cb, lps); return; } if (lps->fdisc) bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg); } /* * Clear virtual link completion handler for non-fcs */ static void bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete) { struct bfa_lps_s *lps = arg; if (!complete) return; /* Clear virtual link to base port will result in link down */ if (lps->fdisc) bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg); } /* * Received Clear virtual link event --direct call for fcs, * queue for others */ static void bfa_lps_cvl_event(struct bfa_lps_s *lps) { if (!lps->bfa->fcs) { bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb, lps); return; } /* Clear virtual link to base port will result in link down */ if (lps->fdisc) bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg); } /* * lps_public BFA LPS public functions */ u32 bfa_lps_get_max_vport(struct bfa_s *bfa) { if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT) return BFA_LPS_MAX_VPORTS_SUPP_CT; else return BFA_LPS_MAX_VPORTS_SUPP_CB; } /* * Allocate a lport srvice tag. */ struct bfa_lps_s * bfa_lps_alloc(struct bfa_s *bfa) { struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); struct bfa_lps_s *lps = NULL; bfa_q_deq(&mod->lps_free_q, &lps); if (lps == NULL) return NULL; list_add_tail(&lps->qe, &mod->lps_active_q); bfa_sm_set_state(lps, bfa_lps_sm_init); return lps; } /* * Free lport service tag. This can be called anytime after an alloc. * No need to wait for any pending login/logout completions. */ void bfa_lps_delete(struct bfa_lps_s *lps) { bfa_sm_send_event(lps, BFA_LPS_SM_DELETE); } /* * Initiate a lport login. */ void bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz, wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en) { lps->uarg = uarg; lps->alpa = alpa; lps->pdusz = pdusz; lps->pwwn = pwwn; lps->nwwn = nwwn; lps->fdisc = BFA_FALSE; lps->auth_en = auth_en; bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN); } /* * Initiate a lport fdisc login. */ void bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn, wwn_t nwwn) { lps->uarg = uarg; lps->alpa = 0; lps->pdusz = pdusz; lps->pwwn = pwwn; lps->nwwn = nwwn; lps->fdisc = BFA_TRUE; lps->auth_en = BFA_FALSE; bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN); } /* * Initiate a lport FDSIC logout. */ void bfa_lps_fdisclogo(struct bfa_lps_s *lps) { bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT); } u8 bfa_lps_get_fwtag(struct bfa_s *bfa, u8 lp_tag) { struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); return BFA_LPS_FROM_TAG(mod, lp_tag)->fw_tag; } /* * Return lport services tag given the pid */ u8 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid) { struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); struct bfa_lps_s *lps; int i; for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) { if (lps->lp_pid == pid) return lps->bfa_tag; } /* Return base port tag anyway */ return 0; } /* * return port id assigned to the base lport */ u32 bfa_lps_get_base_pid(struct bfa_s *bfa) { struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); return BFA_LPS_FROM_TAG(mod, 0)->lp_pid; } /* * Set PID in case of n2n (which is assigned during PLOGI) */ void bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, uint32_t n2n_pid) { bfa_trc(lps->bfa, lps->bfa_tag); bfa_trc(lps->bfa, n2n_pid); lps->lp_pid = n2n_pid; bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID); } /* * LPS firmware message class handler. */ void bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m) { union bfi_lps_i2h_msg_u msg; bfa_trc(bfa, m->mhdr.msg_id); msg.msg = m; switch (m->mhdr.msg_id) { case BFI_LPS_I2H_LOGIN_RSP: bfa_lps_login_rsp(bfa, msg.login_rsp); break; case BFI_LPS_I2H_LOGOUT_RSP: bfa_lps_logout_rsp(bfa, msg.logout_rsp); break; case BFI_LPS_I2H_CVL_EVENT: bfa_lps_rx_cvl_event(bfa, msg.cvl_event); break; default: bfa_trc(bfa, m->mhdr.msg_id); WARN_ON(1); } } static void bfa_fcport_aen_post(struct bfa_fcport_s *fcport, enum bfa_port_aen_event event) { struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad; struct bfa_aen_entry_s *aen_entry; bfad_get_aen_entry(bfad, aen_entry); if (!aen_entry) return; aen_entry->aen_data.port.ioc_type = bfa_get_type(fcport->bfa); aen_entry->aen_data.port.pwwn = fcport->pwwn; /* Send the AEN notification */ bfad_im_post_vendor_event(aen_entry, bfad, ++fcport->bfa->bfa_aen_seq, BFA_AEN_CAT_PORT, event); } /* * FC PORT state machine functions */ static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) { bfa_trc(fcport->bfa, event); switch (event) { case BFA_FCPORT_SM_START: /* * Start event after IOC is configured and BFA is started. */ fcport->use_flash_cfg = BFA_TRUE; if (bfa_fcport_send_enable(fcport)) { bfa_trc(fcport->bfa, BFA_TRUE); bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); } else { bfa_trc(fcport->bfa, BFA_FALSE); bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait); } break; case BFA_FCPORT_SM_ENABLE: /* * Port is persistently configured to be in enabled state. Do * not change state. Port enabling is done when START event is * received. */ break; case BFA_FCPORT_SM_DISABLE: /* * If a port is persistently configured to be disabled, the * first event will a port disable request. */ bfa_sm_set_state(fcport, bfa_fcport_sm_disabled); break; case BFA_FCPORT_SM_HWFAIL: bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); break; default: bfa_sm_fault(fcport->bfa, event); } } static void bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) { char pwwn_buf[BFA_STRING_32]; struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad; bfa_trc(fcport->bfa, event); switch (event) { case BFA_FCPORT_SM_QRESUME: bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); bfa_fcport_send_enable(fcport); break; case BFA_FCPORT_SM_STOP: bfa_reqq_wcancel(&fcport->reqq_wait); bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); break; case BFA_FCPORT_SM_ENABLE: /* * Already enable is in progress. */ break; case BFA_FCPORT_SM_DISABLE: /* * Just send disable request to firmware when room becomes * available in request queue. */ bfa_sm_set_state(fcport, bfa_fcport_sm_disabled); bfa_reqq_wcancel(&fcport->reqq_wait); bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); wwn2str(pwwn_buf, fcport->pwwn); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Base port disabled: WWN = %s\n", pwwn_buf); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE); break; case BFA_FCPORT_SM_LINKUP: case BFA_FCPORT_SM_LINKDOWN: /* * Possible to get link events when doing back-to-back * enable/disables. */ break; case BFA_FCPORT_SM_HWFAIL: bfa_reqq_wcancel(&fcport->reqq_wait); bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); break; case BFA_FCPORT_SM_FAA_MISCONFIG: bfa_fcport_reset_linkinfo(fcport); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT); bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig); break; default: bfa_sm_fault(fcport->bfa, event); } } static void bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) { char pwwn_buf[BFA_STRING_32]; struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad; bfa_trc(fcport->bfa, event); switch (event) { case BFA_FCPORT_SM_FWRSP: case BFA_FCPORT_SM_LINKDOWN: bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown); break; case BFA_FCPORT_SM_LINKUP: bfa_fcport_update_linkinfo(fcport); bfa_sm_set_state(fcport, bfa_fcport_sm_linkup); WARN_ON(!fcport->event_cbfn); bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE); break; case BFA_FCPORT_SM_ENABLE: /* * Already being enabled. */ break; case BFA_FCPORT_SM_DISABLE: if (bfa_fcport_send_disable(fcport)) bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); else bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait); bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); wwn2str(pwwn_buf, fcport->pwwn); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Base port disabled: WWN = %s\n", pwwn_buf); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE); break; case BFA_FCPORT_SM_STOP: bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); break; case BFA_FCPORT_SM_HWFAIL: bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); break; case BFA_FCPORT_SM_FAA_MISCONFIG: bfa_fcport_reset_linkinfo(fcport); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT); bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig); break; default: bfa_sm_fault(fcport->bfa, event); } } static void bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) { struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event; char pwwn_buf[BFA_STRING_32]; struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad; bfa_trc(fcport->bfa, event); switch (event) { case BFA_FCPORT_SM_LINKUP: bfa_fcport_update_linkinfo(fcport); bfa_sm_set_state(fcport, bfa_fcport_sm_linkup); WARN_ON(!fcport->event_cbfn); bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup"); if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) { bfa_trc(fcport->bfa, pevent->link_state.attr.vc_fcf.fcf.fipenabled); bfa_trc(fcport->bfa, pevent->link_state.attr.vc_fcf.fcf.fipfailed); if (pevent->link_state.attr.vc_fcf.fcf.fipfailed) bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_FIP_FCF_DISC, 0, "FIP FCF Discovery Failed"); else bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_FIP_FCF_DISC, 0, "FIP FCF Discovered"); } bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE); wwn2str(pwwn_buf, fcport->pwwn); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Base port online: WWN = %s\n", pwwn_buf); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ONLINE); /* If QoS is enabled and it is not online, send AEN */ if (fcport->cfg.qos_enabled && fcport->qos_attr.state != BFA_QOS_ONLINE) bfa_fcport_aen_post(fcport, BFA_PORT_AEN_QOS_NEG); break; case BFA_FCPORT_SM_LINKDOWN: /* * Possible to get link down event. */ break; case BFA_FCPORT_SM_ENABLE: /* * Already enabled. */ break; case BFA_FCPORT_SM_DISABLE: if (bfa_fcport_send_disable(fcport)) bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); else bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait); bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); wwn2str(pwwn_buf, fcport->pwwn); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Base port disabled: WWN = %s\n", pwwn_buf); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE); break; case BFA_FCPORT_SM_STOP: bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); break; case BFA_FCPORT_SM_HWFAIL: bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); break; case BFA_FCPORT_SM_FAA_MISCONFIG: bfa_fcport_reset_linkinfo(fcport); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT); bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig); break; default: bfa_sm_fault(fcport->bfa, event); } } static void bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) { char pwwn_buf[BFA_STRING_32]; struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad; bfa_trc(fcport->bfa, event); switch (event) { case BFA_FCPORT_SM_ENABLE: /* * Already enabled. */ break; case BFA_FCPORT_SM_DISABLE: if (bfa_fcport_send_disable(fcport)) bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); else bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait); bfa_fcport_reset_linkinfo(fcport); bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE); bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); wwn2str(pwwn_buf, fcport->pwwn); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Base port offline: WWN = %s\n", pwwn_buf); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Base port disabled: WWN = %s\n", pwwn_buf); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE); break; case BFA_FCPORT_SM_LINKDOWN: bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown); bfa_fcport_reset_linkinfo(fcport); bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE); bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown"); wwn2str(pwwn_buf, fcport->pwwn); if (BFA_PORT_IS_DISABLED(fcport->bfa)) { BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Base port offline: WWN = %s\n", pwwn_buf); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE); } else { BFA_LOG(KERN_ERR, bfad, bfa_log_level, "Base port (WWN = %s) " "lost fabric connectivity\n", pwwn_buf); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT); } break; case BFA_FCPORT_SM_STOP: bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); bfa_fcport_reset_linkinfo(fcport); wwn2str(pwwn_buf, fcport->pwwn); if (BFA_PORT_IS_DISABLED(fcport->bfa)) { BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Base port offline: WWN = %s\n", pwwn_buf); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE); } else { BFA_LOG(KERN_ERR, bfad, bfa_log_level, "Base port (WWN = %s) " "lost fabric connectivity\n", pwwn_buf); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT); } break; case BFA_FCPORT_SM_HWFAIL: bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); bfa_fcport_reset_linkinfo(fcport); bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE); wwn2str(pwwn_buf, fcport->pwwn); if (BFA_PORT_IS_DISABLED(fcport->bfa)) { BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Base port offline: WWN = %s\n", pwwn_buf); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE); } else { BFA_LOG(KERN_ERR, bfad, bfa_log_level, "Base port (WWN = %s) " "lost fabric connectivity\n", pwwn_buf); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT); } break; case BFA_FCPORT_SM_FAA_MISCONFIG: bfa_fcport_reset_linkinfo(fcport); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT); bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig); break; default: bfa_sm_fault(fcport->bfa, event); } } static void bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) { bfa_trc(fcport->bfa, event); switch (event) { case BFA_FCPORT_SM_QRESUME: bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); bfa_fcport_send_disable(fcport); break; case BFA_FCPORT_SM_STOP: bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); bfa_reqq_wcancel(&fcport->reqq_wait); break; case BFA_FCPORT_SM_ENABLE: bfa_sm_set_state(fcport, bfa_fcport_sm_toggling_qwait); break; case BFA_FCPORT_SM_DISABLE: /* * Already being disabled. */ break; case BFA_FCPORT_SM_LINKUP: case BFA_FCPORT_SM_LINKDOWN: /* * Possible to get link events when doing back-to-back * enable/disables. */ break; case BFA_FCPORT_SM_HWFAIL: bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); bfa_reqq_wcancel(&fcport->reqq_wait); break; case BFA_FCPORT_SM_FAA_MISCONFIG: bfa_fcport_reset_linkinfo(fcport); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT); bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig); break; default: bfa_sm_fault(fcport->bfa, event); } } static void bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) { bfa_trc(fcport->bfa, event); switch (event) { case BFA_FCPORT_SM_QRESUME: bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); bfa_fcport_send_disable(fcport); if (bfa_fcport_send_enable(fcport)) bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); else bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait); break; case BFA_FCPORT_SM_STOP: bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); bfa_reqq_wcancel(&fcport->reqq_wait); break; case BFA_FCPORT_SM_ENABLE: break; case BFA_FCPORT_SM_DISABLE: bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait); break; case BFA_FCPORT_SM_LINKUP: case BFA_FCPORT_SM_LINKDOWN: /* * Possible to get link events when doing back-to-back * enable/disables. */ break; case BFA_FCPORT_SM_HWFAIL: bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); bfa_reqq_wcancel(&fcport->reqq_wait); break; default: bfa_sm_fault(fcport->bfa, event); } } static void bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) { char pwwn_buf[BFA_STRING_32]; struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad; bfa_trc(fcport->bfa, event); switch (event) { case BFA_FCPORT_SM_FWRSP: bfa_sm_set_state(fcport, bfa_fcport_sm_disabled); break; case BFA_FCPORT_SM_DISABLE: /* * Already being disabled. */ break; case BFA_FCPORT_SM_ENABLE: if (bfa_fcport_send_enable(fcport)) bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); else bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait); bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_PORT_ENABLE, 0, "Port Enable"); wwn2str(pwwn_buf, fcport->pwwn); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Base port enabled: WWN = %s\n", pwwn_buf); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE); break; case BFA_FCPORT_SM_STOP: bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); break; case BFA_FCPORT_SM_LINKUP: case BFA_FCPORT_SM_LINKDOWN: /* * Possible to get link events when doing back-to-back * enable/disables. */ break; case BFA_FCPORT_SM_HWFAIL: bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); break; default: bfa_sm_fault(fcport->bfa, event); } } static void bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) { char pwwn_buf[BFA_STRING_32]; struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad; bfa_trc(fcport->bfa, event); switch (event) { case BFA_FCPORT_SM_START: /* * Ignore start event for a port that is disabled. */ break; case BFA_FCPORT_SM_STOP: bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); break; case BFA_FCPORT_SM_ENABLE: if (bfa_fcport_send_enable(fcport)) bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); else bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait); bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_PORT_ENABLE, 0, "Port Enable"); wwn2str(pwwn_buf, fcport->pwwn); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Base port enabled: WWN = %s\n", pwwn_buf); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE); break; case BFA_FCPORT_SM_DISABLE: /* * Already disabled. */ break; case BFA_FCPORT_SM_HWFAIL: bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); break; case BFA_FCPORT_SM_DPORTENABLE: bfa_sm_set_state(fcport, bfa_fcport_sm_dport); break; case BFA_FCPORT_SM_DDPORTENABLE: bfa_sm_set_state(fcport, bfa_fcport_sm_ddport); break; default: bfa_sm_fault(fcport->bfa, event); } } static void bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) { bfa_trc(fcport->bfa, event); switch (event) { case BFA_FCPORT_SM_START: if (bfa_fcport_send_enable(fcport)) bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); else bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait); break; default: /* * Ignore all other events. */ ; } } /* * Port is enabled. IOC is down/failed. */ static void bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) { bfa_trc(fcport->bfa, event); switch (event) { case BFA_FCPORT_SM_START: if (bfa_fcport_send_enable(fcport)) bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); else bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait); break; default: /* * Ignore all events. */ ; } } /* * Port is disabled. IOC is down/failed. */ static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) { bfa_trc(fcport->bfa, event); switch (event) { case BFA_FCPORT_SM_START: bfa_sm_set_state(fcport, bfa_fcport_sm_disabled); break; case BFA_FCPORT_SM_ENABLE: bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); break; default: /* * Ignore all events. */ ; } } static void bfa_fcport_sm_dport(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) { bfa_trc(fcport->bfa, event); switch (event) { case BFA_FCPORT_SM_DPORTENABLE: case BFA_FCPORT_SM_DISABLE: case BFA_FCPORT_SM_ENABLE: case BFA_FCPORT_SM_START: /* * Ignore event for a port that is dport */ break; case BFA_FCPORT_SM_STOP: bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); break; case BFA_FCPORT_SM_HWFAIL: bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); break; case BFA_FCPORT_SM_DPORTDISABLE: bfa_sm_set_state(fcport, bfa_fcport_sm_disabled); break; default: bfa_sm_fault(fcport->bfa, event); } } static void bfa_fcport_sm_ddport(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) { bfa_trc(fcport->bfa, event); switch (event) { case BFA_FCPORT_SM_DISABLE: case BFA_FCPORT_SM_DDPORTDISABLE: bfa_sm_set_state(fcport, bfa_fcport_sm_disabled); break; case BFA_FCPORT_SM_DPORTENABLE: case BFA_FCPORT_SM_DPORTDISABLE: case BFA_FCPORT_SM_ENABLE: case BFA_FCPORT_SM_START: /* * Ignore event for a port that is ddport */ break; case BFA_FCPORT_SM_STOP: bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); break; case BFA_FCPORT_SM_HWFAIL: bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); break; default: bfa_sm_fault(fcport->bfa, event); } } static void bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) { bfa_trc(fcport->bfa, event); switch (event) { case BFA_FCPORT_SM_DPORTENABLE: case BFA_FCPORT_SM_ENABLE: case BFA_FCPORT_SM_START: /* * Ignore event for a port as there is FAA misconfig */ break; case BFA_FCPORT_SM_DISABLE: if (bfa_fcport_send_disable(fcport)) bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); else bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait); bfa_fcport_reset_linkinfo(fcport); bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE); bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE); break; case BFA_FCPORT_SM_STOP: bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); break; case BFA_FCPORT_SM_HWFAIL: bfa_fcport_reset_linkinfo(fcport); bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE); bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); break; default: bfa_sm_fault(fcport->bfa, event); } } /* * Link state is down */ static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln, enum bfa_fcport_ln_sm_event event) { bfa_trc(ln->fcport->bfa, event); switch (event) { case BFA_FCPORT_LN_SM_LINKUP: bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf); bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP); break; default: bfa_sm_fault(ln->fcport->bfa, event); } } /* * Link state is waiting for down notification */ static void bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln, enum bfa_fcport_ln_sm_event event) { bfa_trc(ln->fcport->bfa, event); switch (event) { case BFA_FCPORT_LN_SM_LINKUP: bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf); break; case BFA_FCPORT_LN_SM_NOTIFICATION: bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn); break; default: bfa_sm_fault(ln->fcport->bfa, event); } } /* * Link state is waiting for down notification and there is a pending up */ static void bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln, enum bfa_fcport_ln_sm_event event) { bfa_trc(ln->fcport->bfa, event); switch (event) { case BFA_FCPORT_LN_SM_LINKDOWN: bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf); break; case BFA_FCPORT_LN_SM_NOTIFICATION: bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf); bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP); break; default: bfa_sm_fault(ln->fcport->bfa, event); } } /* * Link state is up */ static void bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln, enum bfa_fcport_ln_sm_event event) { bfa_trc(ln->fcport->bfa, event); switch (event) { case BFA_FCPORT_LN_SM_LINKDOWN: bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf); bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN); break; default: bfa_sm_fault(ln->fcport->bfa, event); } } /* * Link state is waiting for up notification */ static void bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln, enum bfa_fcport_ln_sm_event event) { bfa_trc(ln->fcport->bfa, event); switch (event) { case BFA_FCPORT_LN_SM_LINKDOWN: bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf); break; case BFA_FCPORT_LN_SM_NOTIFICATION: bfa_sm_set_state(ln, bfa_fcport_ln_sm_up); break; default: bfa_sm_fault(ln->fcport->bfa, event); } } /* * Link state is waiting for up notification and there is a pending down */ static void bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln, enum bfa_fcport_ln_sm_event event) { bfa_trc(ln->fcport->bfa, event); switch (event) { case BFA_FCPORT_LN_SM_LINKUP: bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf); break; case BFA_FCPORT_LN_SM_NOTIFICATION: bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf); bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN); break; default: bfa_sm_fault(ln->fcport->bfa, event); } } /* * Link state is waiting for up notification and there are pending down and up */ static void bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln, enum bfa_fcport_ln_sm_event event) { bfa_trc(ln->fcport->bfa, event); switch (event) { case BFA_FCPORT_LN_SM_LINKDOWN: bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf); break; case BFA_FCPORT_LN_SM_NOTIFICATION: bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf); bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN); break; default: bfa_sm_fault(ln->fcport->bfa, event); } } static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete) { struct bfa_fcport_ln_s *ln = cbarg; if (complete) ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event); else bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION); } /* * Send SCN notification to upper layers. * trunk - false if caller is fcport to ignore fcport event in trunked mode */ static void bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event, bfa_boolean_t trunk) { if (fcport->cfg.trunked && !trunk) return; switch (event) { case BFA_PORT_LINKUP: bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP); break; case BFA_PORT_LINKDOWN: bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN); break; default: WARN_ON(1); } } static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event) { struct bfa_fcport_s *fcport = ln->fcport; if (fcport->bfa->fcs) { fcport->event_cbfn(fcport->event_cbarg, event); bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION); } else { ln->ln_event = event; bfa_cb_queue(fcport->bfa, &ln->ln_qe, __bfa_cb_fcport_event, ln); } } #define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \ BFA_CACHELINE_SZ)) void bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo, struct bfa_s *bfa) { struct bfa_mem_dma_s *fcport_dma = BFA_MEM_FCPORT_DMA(bfa); bfa_mem_dma_setup(minfo, fcport_dma, FCPORT_STATS_DMA_SZ); } static void bfa_fcport_qresume(void *cbarg) { struct bfa_fcport_s *fcport = cbarg; bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME); } static void bfa_fcport_mem_claim(struct bfa_fcport_s *fcport) { struct bfa_mem_dma_s *fcport_dma = &fcport->fcport_dma; fcport->stats_kva = bfa_mem_dma_virt(fcport_dma); fcport->stats_pa = bfa_mem_dma_phys(fcport_dma); fcport->stats = (union bfa_fcport_stats_u *) bfa_mem_dma_virt(fcport_dma); } /* * Memory initialization. */ void bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); struct bfa_port_cfg_s *port_cfg = &fcport->cfg; struct bfa_fcport_ln_s *ln = &fcport->ln; fcport->bfa = bfa; ln->fcport = fcport; bfa_fcport_mem_claim(fcport); bfa_sm_set_state(fcport, bfa_fcport_sm_uninit); bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn); /* * initialize time stamp for stats reset */ fcport->stats_reset_time = ktime_get_seconds(); fcport->stats_dma_ready = BFA_FALSE; /* * initialize and set default configuration */ port_cfg->topology = BFA_PORT_TOPOLOGY_P2P; port_cfg->speed = BFA_PORT_SPEED_AUTO; port_cfg->trunked = BFA_FALSE; port_cfg->maxfrsize = 0; port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS; port_cfg->qos_bw.high = BFA_QOS_BW_HIGH; port_cfg->qos_bw.med = BFA_QOS_BW_MED; port_cfg->qos_bw.low = BFA_QOS_BW_LOW; fcport->fec_state = BFA_FEC_OFFLINE; INIT_LIST_HEAD(&fcport->stats_pending_q); INIT_LIST_HEAD(&fcport->statsclr_pending_q); bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport); } void bfa_fcport_start(struct bfa_s *bfa) { bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START); } /* * Called when IOC failure is detected. */ void bfa_fcport_iocdisable(struct bfa_s *bfa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); bfa_sm_send_event(fcport, BFA_FCPORT_SM_HWFAIL); bfa_trunk_iocdisable(bfa); } /* * Update loop info in fcport for SCN online */ static void bfa_fcport_update_loop_info(struct bfa_fcport_s *fcport, struct bfa_fcport_loop_info_s *loop_info) { fcport->myalpa = loop_info->myalpa; fcport->alpabm_valid = loop_info->alpabm_val; memcpy(fcport->alpabm.alpa_bm, loop_info->alpabm.alpa_bm, sizeof(struct fc_alpabm_s)); } static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport) { struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event; struct bfa_fcport_trunk_s *trunk = &fcport->trunk; fcport->speed = pevent->link_state.speed; fcport->topology = pevent->link_state.topology; if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP) { bfa_fcport_update_loop_info(fcport, &pevent->link_state.attr.loop_info); return; } /* QoS Details */ fcport->qos_attr = pevent->link_state.qos_attr; fcport->qos_vc_attr = pevent->link_state.attr.vc_fcf.qos_vc_attr; if (fcport->cfg.bb_cr_enabled) fcport->bbcr_attr = pevent->link_state.attr.bbcr_attr; fcport->fec_state = pevent->link_state.fec_state; /* * update trunk state if applicable */ if (!fcport->cfg.trunked) trunk->attr.state = BFA_TRUNK_DISABLED; /* update FCoE specific */ fcport->fcoe_vlan = be16_to_cpu(pevent->link_state.attr.vc_fcf.fcf.vlan); bfa_trc(fcport->bfa, fcport->speed); bfa_trc(fcport->bfa, fcport->topology); } static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport) { fcport->speed = BFA_PORT_SPEED_UNKNOWN; fcport->topology = BFA_PORT_TOPOLOGY_NONE; fcport->fec_state = BFA_FEC_OFFLINE; } /* * Send port enable message to firmware. */ static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport) { struct bfi_fcport_enable_req_s *m; /* * Increment message tag before queue check, so that responses to old * requests are discarded. */ fcport->msgtag++; /* * check for room in queue to send request now */ m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); if (!m) { bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT, &fcport->reqq_wait); return BFA_FALSE; } bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ, bfa_fn_lpu(fcport->bfa)); m->nwwn = fcport->nwwn; m->pwwn = fcport->pwwn; m->port_cfg = fcport->cfg; m->msgtag = fcport->msgtag; m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize); m->use_flash_cfg = fcport->use_flash_cfg; bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa); bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo); bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi); /* * queue I/O message to firmware */ bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh); return BFA_TRUE; } /* * Send port disable message to firmware. */ static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport) { struct bfi_fcport_req_s *m; /* * Increment message tag before queue check, so that responses to old * requests are discarded. */ fcport->msgtag++; /* * check for room in queue to send request now */ m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); if (!m) { bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT, &fcport->reqq_wait); return BFA_FALSE; } bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ, bfa_fn_lpu(fcport->bfa)); m->msgtag = fcport->msgtag; /* * queue I/O message to firmware */ bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh); return BFA_TRUE; } static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport) { fcport->pwwn = fcport->bfa->ioc.attr->pwwn; fcport->nwwn = fcport->bfa->ioc.attr->nwwn; bfa_trc(fcport->bfa, fcport->pwwn); bfa_trc(fcport->bfa, fcport->nwwn); } static void bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d, struct bfa_qos_stats_s *s) { u32 *dip = (u32 *) d; __be32 *sip = (__be32 *) s; int i; /* Now swap the 32 bit fields */ for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i) dip[i] = be32_to_cpu(sip[i]); } static void bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d, struct bfa_fcoe_stats_s *s) { u32 *dip = (u32 *) d; __be32 *sip = (__be32 *) s; int i; for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32)); i = i + 2) { #ifdef __BIG_ENDIAN dip[i] = be32_to_cpu(sip[i]); dip[i + 1] = be32_to_cpu(sip[i + 1]); #else dip[i] = be32_to_cpu(sip[i + 1]); dip[i + 1] = be32_to_cpu(sip[i]); #endif } } static void __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete) { struct bfa_fcport_s *fcport = (struct bfa_fcport_s *)cbarg; struct bfa_cb_pending_q_s *cb; struct list_head *qe, *qen; union bfa_fcport_stats_u *ret; if (complete) { time64_t time = ktime_get_seconds(); list_for_each_safe(qe, qen, &fcport->stats_pending_q) { bfa_q_deq(&fcport->stats_pending_q, &qe); cb = (struct bfa_cb_pending_q_s *)qe; if (fcport->stats_status == BFA_STATUS_OK) { ret = (union bfa_fcport_stats_u *)cb->data; /* Swap FC QoS or FCoE stats */ if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) bfa_fcport_qos_stats_swap(&ret->fcqos, &fcport->stats->fcqos); else { bfa_fcport_fcoe_stats_swap(&ret->fcoe, &fcport->stats->fcoe); ret->fcoe.secs_reset = time - fcport->stats_reset_time; } } bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe, fcport->stats_status); } fcport->stats_status = BFA_STATUS_OK; } else { INIT_LIST_HEAD(&fcport->stats_pending_q); fcport->stats_status = BFA_STATUS_OK; } } static void bfa_fcport_stats_get_timeout(void *cbarg) { struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg; bfa_trc(fcport->bfa, fcport->stats_qfull); if (fcport->stats_qfull) { bfa_reqq_wcancel(&fcport->stats_reqq_wait); fcport->stats_qfull = BFA_FALSE; } fcport->stats_status = BFA_STATUS_ETIMER; __bfa_cb_fcport_stats_get(fcport, BFA_TRUE); } static void bfa_fcport_send_stats_get(void *cbarg) { struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg; struct bfi_fcport_req_s *msg; msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); if (!msg) { fcport->stats_qfull = BFA_TRUE; bfa_reqq_winit(&fcport->stats_reqq_wait, bfa_fcport_send_stats_get, fcport); bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT, &fcport->stats_reqq_wait); return; } fcport->stats_qfull = BFA_FALSE; memset(msg, 0, sizeof(struct bfi_fcport_req_s)); bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ, bfa_fn_lpu(fcport->bfa)); bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh); } static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete) { struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg; struct bfa_cb_pending_q_s *cb; struct list_head *qe, *qen; if (complete) { /* * re-initialize time stamp for stats reset */ fcport->stats_reset_time = ktime_get_seconds(); list_for_each_safe(qe, qen, &fcport->statsclr_pending_q) { bfa_q_deq(&fcport->statsclr_pending_q, &qe); cb = (struct bfa_cb_pending_q_s *)qe; bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe, fcport->stats_status); } fcport->stats_status = BFA_STATUS_OK; } else { INIT_LIST_HEAD(&fcport->statsclr_pending_q); fcport->stats_status = BFA_STATUS_OK; } } static void bfa_fcport_stats_clr_timeout(void *cbarg) { struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg; bfa_trc(fcport->bfa, fcport->stats_qfull); if (fcport->stats_qfull) { bfa_reqq_wcancel(&fcport->stats_reqq_wait); fcport->stats_qfull = BFA_FALSE; } fcport->stats_status = BFA_STATUS_ETIMER; __bfa_cb_fcport_stats_clr(fcport, BFA_TRUE); } static void bfa_fcport_send_stats_clear(void *cbarg) { struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg; struct bfi_fcport_req_s *msg; msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); if (!msg) { fcport->stats_qfull = BFA_TRUE; bfa_reqq_winit(&fcport->stats_reqq_wait, bfa_fcport_send_stats_clear, fcport); bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT, &fcport->stats_reqq_wait); return; } fcport->stats_qfull = BFA_FALSE; memset(msg, 0, sizeof(struct bfi_fcport_req_s)); bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ, bfa_fn_lpu(fcport->bfa)); bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh); } /* * Handle trunk SCN event from firmware. */ static void bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn) { struct bfa_fcport_trunk_s *trunk = &fcport->trunk; struct bfi_fcport_trunk_link_s *tlink; struct bfa_trunk_link_attr_s *lattr; enum bfa_trunk_state state_prev; int i; int link_bm = 0; bfa_trc(fcport->bfa, fcport->cfg.trunked); WARN_ON(scn->trunk_state != BFA_TRUNK_ONLINE && scn->trunk_state != BFA_TRUNK_OFFLINE); bfa_trc(fcport->bfa, trunk->attr.state); bfa_trc(fcport->bfa, scn->trunk_state); bfa_trc(fcport->bfa, scn->trunk_speed); /* * Save off new state for trunk attribute query */ state_prev = trunk->attr.state; if (fcport->cfg.trunked && (trunk->attr.state != BFA_TRUNK_DISABLED)) trunk->attr.state = scn->trunk_state; trunk->attr.speed = scn->trunk_speed; for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) { lattr = &trunk->attr.link_attr[i]; tlink = &scn->tlink[i]; lattr->link_state = tlink->state; lattr->trunk_wwn = tlink->trunk_wwn; lattr->fctl = tlink->fctl; lattr->speed = tlink->speed; lattr->deskew = be32_to_cpu(tlink->deskew); if (tlink->state == BFA_TRUNK_LINK_STATE_UP) { fcport->speed = tlink->speed; fcport->topology = BFA_PORT_TOPOLOGY_P2P; link_bm |= 1 << i; } bfa_trc(fcport->bfa, lattr->link_state); bfa_trc(fcport->bfa, lattr->trunk_wwn); bfa_trc(fcport->bfa, lattr->fctl); bfa_trc(fcport->bfa, lattr->speed); bfa_trc(fcport->bfa, lattr->deskew); } switch (link_bm) { case 3: bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,1)"); break; case 2: bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(-,1)"); break; case 1: bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,-)"); break; default: bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_TRUNK_SCN, 0, "Trunk down"); } /* * Notify upper layers if trunk state changed. */ if ((state_prev != trunk->attr.state) || (scn->trunk_state == BFA_TRUNK_OFFLINE)) { bfa_fcport_scn(fcport, (scn->trunk_state == BFA_TRUNK_ONLINE) ? BFA_PORT_LINKUP : BFA_PORT_LINKDOWN, BFA_TRUE); } } static void bfa_trunk_iocdisable(struct bfa_s *bfa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); int i = 0; /* * In trunked mode, notify upper layers that link is down */ if (fcport->cfg.trunked) { if (fcport->trunk.attr.state == BFA_TRUNK_ONLINE) bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_TRUE); fcport->trunk.attr.state = BFA_TRUNK_OFFLINE; fcport->trunk.attr.speed = BFA_PORT_SPEED_UNKNOWN; for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) { fcport->trunk.attr.link_attr[i].trunk_wwn = 0; fcport->trunk.attr.link_attr[i].fctl = BFA_TRUNK_LINK_FCTL_NORMAL; fcport->trunk.attr.link_attr[i].link_state = BFA_TRUNK_LINK_STATE_DN_LINKDN; fcport->trunk.attr.link_attr[i].speed = BFA_PORT_SPEED_UNKNOWN; fcport->trunk.attr.link_attr[i].deskew = 0; } } } /* * Called to initialize port attributes */ void bfa_fcport_init(struct bfa_s *bfa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); /* * Initialize port attributes from IOC hardware data. */ bfa_fcport_set_wwns(fcport); if (fcport->cfg.maxfrsize == 0) fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc); fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc); fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc); if (bfa_fcport_is_pbcdisabled(bfa)) bfa->modules.port.pbc_disabled = BFA_TRUE; WARN_ON(!fcport->cfg.maxfrsize); WARN_ON(!fcport->cfg.rx_bbcredit); WARN_ON(!fcport->speed_sup); } /* * Firmware message handler. */ void bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); union bfi_fcport_i2h_msg_u i2hmsg; i2hmsg.msg = msg; fcport->event_arg.i2hmsg = i2hmsg; bfa_trc(bfa, msg->mhdr.msg_id); bfa_trc(bfa, bfa_sm_to_state(hal_port_sm_table, fcport->sm)); switch (msg->mhdr.msg_id) { case BFI_FCPORT_I2H_ENABLE_RSP: if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) { fcport->stats_dma_ready = BFA_TRUE; if (fcport->use_flash_cfg) { fcport->cfg = i2hmsg.penable_rsp->port_cfg; fcport->cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize); fcport->cfg.path_tov = cpu_to_be16(fcport->cfg.path_tov); fcport->cfg.q_depth = cpu_to_be16(fcport->cfg.q_depth); if (fcport->cfg.trunked) fcport->trunk.attr.state = BFA_TRUNK_OFFLINE; else fcport->trunk.attr.state = BFA_TRUNK_DISABLED; fcport->qos_attr.qos_bw = i2hmsg.penable_rsp->port_cfg.qos_bw; fcport->use_flash_cfg = BFA_FALSE; } if (fcport->cfg.qos_enabled) fcport->qos_attr.state = BFA_QOS_OFFLINE; else fcport->qos_attr.state = BFA_QOS_DISABLED; fcport->qos_attr.qos_bw_op = i2hmsg.penable_rsp->port_cfg.qos_bw; if (fcport->cfg.bb_cr_enabled) fcport->bbcr_attr.state = BFA_BBCR_OFFLINE; else fcport->bbcr_attr.state = BFA_BBCR_DISABLED; bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP); } break; case BFI_FCPORT_I2H_DISABLE_RSP: if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP); break; case BFI_FCPORT_I2H_EVENT: if (fcport->cfg.bb_cr_enabled) fcport->bbcr_attr.state = BFA_BBCR_OFFLINE; else fcport->bbcr_attr.state = BFA_BBCR_DISABLED; if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP) bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP); else { if (i2hmsg.event->link_state.linkstate_rsn == BFA_PORT_LINKSTATE_RSN_FAA_MISCONFIG) bfa_sm_send_event(fcport, BFA_FCPORT_SM_FAA_MISCONFIG); else bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN); } fcport->qos_attr.qos_bw_op = i2hmsg.event->link_state.qos_attr.qos_bw_op; break; case BFI_FCPORT_I2H_TRUNK_SCN: bfa_trunk_scn(fcport, i2hmsg.trunk_scn); break; case BFI_FCPORT_I2H_STATS_GET_RSP: /* * check for timer pop before processing the rsp */ if (list_empty(&fcport->stats_pending_q) || (fcport->stats_status == BFA_STATUS_ETIMER)) break; bfa_timer_stop(&fcport->timer); fcport->stats_status = i2hmsg.pstatsget_rsp->status; __bfa_cb_fcport_stats_get(fcport, BFA_TRUE); break; case BFI_FCPORT_I2H_STATS_CLEAR_RSP: /* * check for timer pop before processing the rsp */ if (list_empty(&fcport->statsclr_pending_q) || (fcport->stats_status == BFA_STATUS_ETIMER)) break; bfa_timer_stop(&fcport->timer); fcport->stats_status = BFA_STATUS_OK; __bfa_cb_fcport_stats_clr(fcport, BFA_TRUE); break; case BFI_FCPORT_I2H_ENABLE_AEN: bfa_sm_send_event(fcport, BFA_FCPORT_SM_ENABLE); break; case BFI_FCPORT_I2H_DISABLE_AEN: bfa_sm_send_event(fcport, BFA_FCPORT_SM_DISABLE); break; default: WARN_ON(1); break; } } /* * Registered callback for port events. */ void bfa_fcport_event_register(struct bfa_s *bfa, void (*cbfn) (void *cbarg, enum bfa_port_linkstate event), void *cbarg) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); fcport->event_cbfn = cbfn; fcport->event_cbarg = cbarg; } bfa_status_t bfa_fcport_enable(struct bfa_s *bfa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); if (bfa_fcport_is_pbcdisabled(bfa)) return BFA_STATUS_PBC; if (bfa_ioc_is_disabled(&bfa->ioc)) return BFA_STATUS_IOC_DISABLED; if (fcport->diag_busy) return BFA_STATUS_DIAG_BUSY; bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE); return BFA_STATUS_OK; } bfa_status_t bfa_fcport_disable(struct bfa_s *bfa) { if (bfa_fcport_is_pbcdisabled(bfa)) return BFA_STATUS_PBC; if (bfa_ioc_is_disabled(&bfa->ioc)) return BFA_STATUS_IOC_DISABLED; bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE); return BFA_STATUS_OK; } /* If PBC is disabled on port, return error */ bfa_status_t bfa_fcport_is_pbcdisabled(struct bfa_s *bfa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); struct bfa_iocfc_s *iocfc = &bfa->iocfc; struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED) { bfa_trc(bfa, fcport->pwwn); return BFA_STATUS_PBC; } return BFA_STATUS_OK; } /* * Configure port speed. */ bfa_status_t bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); bfa_trc(bfa, speed); if (fcport->cfg.trunked == BFA_TRUE) return BFA_STATUS_TRUNK_ENABLED; if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && (speed == BFA_PORT_SPEED_16GBPS)) return BFA_STATUS_UNSUPP_SPEED; if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) { bfa_trc(bfa, fcport->speed_sup); return BFA_STATUS_UNSUPP_SPEED; } /* Port speed entered needs to be checked */ if (bfa_ioc_get_type(&fcport->bfa->ioc) == BFA_IOC_TYPE_FC) { /* For CT2, 1G is not supported */ if ((speed == BFA_PORT_SPEED_1GBPS) && (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id))) return BFA_STATUS_UNSUPP_SPEED; /* Already checked for Auto Speed and Max Speed supp */ if (!(speed == BFA_PORT_SPEED_1GBPS || speed == BFA_PORT_SPEED_2GBPS || speed == BFA_PORT_SPEED_4GBPS || speed == BFA_PORT_SPEED_8GBPS || speed == BFA_PORT_SPEED_16GBPS || speed == BFA_PORT_SPEED_AUTO)) return BFA_STATUS_UNSUPP_SPEED; } else { if (speed != BFA_PORT_SPEED_10GBPS) return BFA_STATUS_UNSUPP_SPEED; } fcport->cfg.speed = speed; return BFA_STATUS_OK; } /* * Get current speed. */ enum bfa_port_speed bfa_fcport_get_speed(struct bfa_s *bfa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); return fcport->speed; } /* * Configure port topology. */ bfa_status_t bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); bfa_trc(bfa, topology); bfa_trc(bfa, fcport->cfg.topology); switch (topology) { case BFA_PORT_TOPOLOGY_P2P: break; case BFA_PORT_TOPOLOGY_LOOP: if ((bfa_fcport_is_qos_enabled(bfa) != BFA_FALSE) || (fcport->qos_attr.state != BFA_QOS_DISABLED)) return BFA_STATUS_ERROR_QOS_ENABLED; if (fcport->cfg.ratelimit != BFA_FALSE) return BFA_STATUS_ERROR_TRL_ENABLED; if ((bfa_fcport_is_trunk_enabled(bfa) != BFA_FALSE) || (fcport->trunk.attr.state != BFA_TRUNK_DISABLED)) return BFA_STATUS_ERROR_TRUNK_ENABLED; if ((bfa_fcport_get_speed(bfa) == BFA_PORT_SPEED_16GBPS) || (fcport->cfg.speed == BFA_PORT_SPEED_16GBPS)) return BFA_STATUS_UNSUPP_SPEED; if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) return BFA_STATUS_LOOP_UNSUPP_MEZZ; if (bfa_fcport_is_dport(bfa) != BFA_FALSE) return BFA_STATUS_DPORT_ERR; if (bfa_fcport_is_ddport(bfa) != BFA_FALSE) return BFA_STATUS_DPORT_ERR; break; case BFA_PORT_TOPOLOGY_AUTO: break; default: return BFA_STATUS_EINVAL; } fcport->cfg.topology = topology; return BFA_STATUS_OK; } /* * Get current topology. */ enum bfa_port_topology bfa_fcport_get_topology(struct bfa_s *bfa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); return fcport->topology; } /* * Get config topology. */ enum bfa_port_topology bfa_fcport_get_cfg_topology(struct bfa_s *bfa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); return fcport->cfg.topology; } bfa_status_t bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); bfa_trc(bfa, alpa); bfa_trc(bfa, fcport->cfg.cfg_hardalpa); bfa_trc(bfa, fcport->cfg.hardalpa); fcport->cfg.cfg_hardalpa = BFA_TRUE; fcport->cfg.hardalpa = alpa; return BFA_STATUS_OK; } bfa_status_t bfa_fcport_clr_hardalpa(struct bfa_s *bfa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); bfa_trc(bfa, fcport->cfg.cfg_hardalpa); bfa_trc(bfa, fcport->cfg.hardalpa); fcport->cfg.cfg_hardalpa = BFA_FALSE; return BFA_STATUS_OK; } bfa_boolean_t bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); *alpa = fcport->cfg.hardalpa; return fcport->cfg.cfg_hardalpa; } u8 bfa_fcport_get_myalpa(struct bfa_s *bfa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); return fcport->myalpa; } bfa_status_t bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); bfa_trc(bfa, maxfrsize); bfa_trc(bfa, fcport->cfg.maxfrsize); /* with in range */ if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ)) return BFA_STATUS_INVLD_DFSZ; /* power of 2, if not the max frame size of 2112 */ if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1))) return BFA_STATUS_INVLD_DFSZ; fcport->cfg.maxfrsize = maxfrsize; return BFA_STATUS_OK; } u16 bfa_fcport_get_maxfrsize(struct bfa_s *bfa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); return fcport->cfg.maxfrsize; } u8 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa) { if (bfa_fcport_get_topology(bfa) != BFA_PORT_TOPOLOGY_LOOP) return (BFA_FCPORT_MOD(bfa))->cfg.rx_bbcredit; else return 0; } void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); fcport->cfg.tx_bbcredit = (u8)tx_bbcredit; } /* * Get port attributes. */ wwn_t bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); if (node) return fcport->nwwn; else return fcport->pwwn; } void bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); memset(attr, 0, sizeof(struct bfa_port_attr_s)); attr->nwwn = fcport->nwwn; attr->pwwn = fcport->pwwn; attr->factorypwwn = bfa->ioc.attr->mfg_pwwn; attr->factorynwwn = bfa->ioc.attr->mfg_nwwn; memcpy(&attr->pport_cfg, &fcport->cfg, sizeof(struct bfa_port_cfg_s)); /* speed attributes */ attr->pport_cfg.speed = fcport->cfg.speed; attr->speed_supported = fcport->speed_sup; attr->speed = fcport->speed; attr->cos_supported = FC_CLASS_3; /* topology attributes */ attr->pport_cfg.topology = fcport->cfg.topology; attr->topology = fcport->topology; attr->pport_cfg.trunked = fcport->cfg.trunked; /* beacon attributes */ attr->beacon = fcport->beacon; attr->link_e2e_beacon = fcport->link_e2e_beacon; attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa); attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa); attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm); attr->fec_state = fcport->fec_state; /* PBC Disabled State */ if (bfa_fcport_is_pbcdisabled(bfa)) attr->port_state = BFA_PORT_ST_PREBOOT_DISABLED; else { if (bfa_ioc_is_disabled(&fcport->bfa->ioc)) attr->port_state = BFA_PORT_ST_IOCDIS; else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc)) attr->port_state = BFA_PORT_ST_FWMISMATCH; } /* FCoE vlan */ attr->fcoe_vlan = fcport->fcoe_vlan; } #define BFA_FCPORT_STATS_TOV 1000 /* * Fetch port statistics (FCQoS or FCoE). */ bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); if (!bfa_iocfc_is_operational(bfa) || !fcport->stats_dma_ready) return BFA_STATUS_IOC_NON_OP; if (!list_empty(&fcport->statsclr_pending_q)) return BFA_STATUS_DEVBUSY; if (list_empty(&fcport->stats_pending_q)) { list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q); bfa_fcport_send_stats_get(fcport); bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_get_timeout, fcport, BFA_FCPORT_STATS_TOV); } else list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q); return BFA_STATUS_OK; } /* * Reset port statistics (FCQoS or FCoE). */ bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); if (!bfa_iocfc_is_operational(bfa) || !fcport->stats_dma_ready) return BFA_STATUS_IOC_NON_OP; if (!list_empty(&fcport->stats_pending_q)) return BFA_STATUS_DEVBUSY; if (list_empty(&fcport->statsclr_pending_q)) { list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q); bfa_fcport_send_stats_clear(fcport); bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout, fcport, BFA_FCPORT_STATS_TOV); } else list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q); return BFA_STATUS_OK; } /* * Fetch port attributes. */ bfa_boolean_t bfa_fcport_is_disabled(struct bfa_s *bfa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); return bfa_sm_to_state(hal_port_sm_table, fcport->sm) == BFA_PORT_ST_DISABLED; } bfa_boolean_t bfa_fcport_is_dport(struct bfa_s *bfa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); return (bfa_sm_to_state(hal_port_sm_table, fcport->sm) == BFA_PORT_ST_DPORT); } bfa_boolean_t bfa_fcport_is_ddport(struct bfa_s *bfa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); return (bfa_sm_to_state(hal_port_sm_table, fcport->sm) == BFA_PORT_ST_DDPORT); } bfa_status_t bfa_fcport_set_qos_bw(struct bfa_s *bfa, struct bfa_qos_bw_s *qos_bw) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa); bfa_trc(bfa, ioc_type); if ((qos_bw->high == 0) || (qos_bw->med == 0) || (qos_bw->low == 0)) return BFA_STATUS_QOS_BW_INVALID; if ((qos_bw->high + qos_bw->med + qos_bw->low) != 100) return BFA_STATUS_QOS_BW_INVALID; if ((qos_bw->med > qos_bw->high) || (qos_bw->low > qos_bw->med) || (qos_bw->low > qos_bw->high)) return BFA_STATUS_QOS_BW_INVALID; if ((ioc_type == BFA_IOC_TYPE_FC) && (fcport->cfg.topology != BFA_PORT_TOPOLOGY_LOOP)) fcport->cfg.qos_bw = *qos_bw; return BFA_STATUS_OK; } bfa_boolean_t bfa_fcport_is_ratelim(struct bfa_s *bfa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE; } /* * Enable/Disable FAA feature in port config */ void bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); bfa_trc(bfa, state); fcport->cfg.faa_state = state; } /* * Get default minimum ratelim speed */ enum bfa_port_speed bfa_fcport_get_ratelim_speed(struct bfa_s *bfa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); bfa_trc(bfa, fcport->cfg.trl_def_speed); return fcport->cfg.trl_def_speed; } void bfa_fcport_beacon(void *dev, bfa_boolean_t beacon, bfa_boolean_t link_e2e_beacon) { struct bfa_s *bfa = dev; struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); bfa_trc(bfa, beacon); bfa_trc(bfa, link_e2e_beacon); bfa_trc(bfa, fcport->beacon); bfa_trc(bfa, fcport->link_e2e_beacon); fcport->beacon = beacon; fcport->link_e2e_beacon = link_e2e_beacon; } bfa_boolean_t bfa_fcport_is_linkup(struct bfa_s *bfa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); return (!fcport->cfg.trunked && bfa_sm_cmp_state(fcport, bfa_fcport_sm_linkup)) || (fcport->cfg.trunked && fcport->trunk.attr.state == BFA_TRUNK_ONLINE); } bfa_boolean_t bfa_fcport_is_qos_enabled(struct bfa_s *bfa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); return fcport->cfg.qos_enabled; } bfa_boolean_t bfa_fcport_is_trunk_enabled(struct bfa_s *bfa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); return fcport->cfg.trunked; } bfa_status_t bfa_fcport_cfg_bbcr(struct bfa_s *bfa, bfa_boolean_t on_off, u8 bb_scn) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); bfa_trc(bfa, on_off); if (bfa_ioc_get_type(&fcport->bfa->ioc) != BFA_IOC_TYPE_FC) return BFA_STATUS_BBCR_FC_ONLY; if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type) && (bfa->ioc.attr->card_type != BFA_MFG_TYPE_CHINOOK)) return BFA_STATUS_CMD_NOTSUPP_MEZZ; if (on_off) { if (fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) return BFA_STATUS_TOPOLOGY_LOOP; if (fcport->cfg.qos_enabled) return BFA_STATUS_ERROR_QOS_ENABLED; if (fcport->cfg.trunked) return BFA_STATUS_TRUNK_ENABLED; if ((fcport->cfg.speed != BFA_PORT_SPEED_AUTO) && (fcport->cfg.speed < bfa_ioc_speed_sup(&bfa->ioc))) return BFA_STATUS_ERR_BBCR_SPEED_UNSUPPORT; if (bfa_ioc_speed_sup(&bfa->ioc) < BFA_PORT_SPEED_8GBPS) return BFA_STATUS_FEATURE_NOT_SUPPORTED; if (fcport->cfg.bb_cr_enabled) { if (bb_scn != fcport->cfg.bb_scn) return BFA_STATUS_BBCR_CFG_NO_CHANGE; else return BFA_STATUS_NO_CHANGE; } if ((bb_scn == 0) || (bb_scn > BFA_BB_SCN_MAX)) bb_scn = BFA_BB_SCN_DEF; fcport->cfg.bb_cr_enabled = on_off; fcport->cfg.bb_scn = bb_scn; } else { if (!fcport->cfg.bb_cr_enabled) return BFA_STATUS_NO_CHANGE; fcport->cfg.bb_cr_enabled = on_off; fcport->cfg.bb_scn = 0; } return BFA_STATUS_OK; } bfa_status_t bfa_fcport_get_bbcr_attr(struct bfa_s *bfa, struct bfa_bbcr_attr_s *bbcr_attr) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); if (bfa_ioc_get_type(&fcport->bfa->ioc) != BFA_IOC_TYPE_FC) return BFA_STATUS_BBCR_FC_ONLY; if (fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) return BFA_STATUS_TOPOLOGY_LOOP; *bbcr_attr = fcport->bbcr_attr; return BFA_STATUS_OK; } void bfa_fcport_dportenable(struct bfa_s *bfa) { /* * Assume caller check for port is in disable state */ bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTENABLE); bfa_port_set_dportenabled(&bfa->modules.port, BFA_TRUE); } void bfa_fcport_dportdisable(struct bfa_s *bfa) { /* * Assume caller check for port is in disable state */ bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTDISABLE); bfa_port_set_dportenabled(&bfa->modules.port, BFA_FALSE); } static void bfa_fcport_ddportenable(struct bfa_s *bfa) { /* * Assume caller check for port is in disable state */ bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DDPORTENABLE); } static void bfa_fcport_ddportdisable(struct bfa_s *bfa) { /* * Assume caller check for port is in disable state */ bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DDPORTDISABLE); } /* * Rport State machine functions */ /* * Beginning state, only online event expected. */ static void bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event) { bfa_trc(rp->bfa, rp->rport_tag); bfa_trc(rp->bfa, event); switch (event) { case BFA_RPORT_SM_CREATE: bfa_stats(rp, sm_un_cr); bfa_sm_set_state(rp, bfa_rport_sm_created); break; default: bfa_stats(rp, sm_un_unexp); bfa_sm_fault(rp->bfa, event); } } static void bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event) { bfa_trc(rp->bfa, rp->rport_tag); bfa_trc(rp->bfa, event); switch (event) { case BFA_RPORT_SM_ONLINE: bfa_stats(rp, sm_cr_on); if (bfa_rport_send_fwcreate(rp)) bfa_sm_set_state(rp, bfa_rport_sm_fwcreate); else bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull); break; case BFA_RPORT_SM_DELETE: bfa_stats(rp, sm_cr_del); bfa_sm_set_state(rp, bfa_rport_sm_uninit); bfa_rport_free(rp); break; case BFA_RPORT_SM_HWFAIL: bfa_stats(rp, sm_cr_hwf); bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); break; default: bfa_stats(rp, sm_cr_unexp); bfa_sm_fault(rp->bfa, event); } } /* * Waiting for rport create response from firmware. */ static void bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event) { bfa_trc(rp->bfa, rp->rport_tag); bfa_trc(rp->bfa, event); switch (event) { case BFA_RPORT_SM_FWRSP: bfa_stats(rp, sm_fwc_rsp); bfa_sm_set_state(rp, bfa_rport_sm_online); bfa_rport_online_cb(rp); break; case BFA_RPORT_SM_DELETE: bfa_stats(rp, sm_fwc_del); bfa_sm_set_state(rp, bfa_rport_sm_delete_pending); break; case BFA_RPORT_SM_OFFLINE: bfa_stats(rp, sm_fwc_off); bfa_sm_set_state(rp, bfa_rport_sm_offline_pending); break; case BFA_RPORT_SM_HWFAIL: bfa_stats(rp, sm_fwc_hwf); bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); break; default: bfa_stats(rp, sm_fwc_unexp); bfa_sm_fault(rp->bfa, event); } } /* * Request queue is full, awaiting queue resume to send create request. */ static void bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event) { bfa_trc(rp->bfa, rp->rport_tag); bfa_trc(rp->bfa, event); switch (event) { case BFA_RPORT_SM_QRESUME: bfa_sm_set_state(rp, bfa_rport_sm_fwcreate); bfa_rport_send_fwcreate(rp); break; case BFA_RPORT_SM_DELETE: bfa_stats(rp, sm_fwc_del); bfa_sm_set_state(rp, bfa_rport_sm_uninit); bfa_reqq_wcancel(&rp->reqq_wait); bfa_rport_free(rp); break; case BFA_RPORT_SM_OFFLINE: bfa_stats(rp, sm_fwc_off); bfa_sm_set_state(rp, bfa_rport_sm_offline); bfa_reqq_wcancel(&rp->reqq_wait); bfa_rport_offline_cb(rp); break; case BFA_RPORT_SM_HWFAIL: bfa_stats(rp, sm_fwc_hwf); bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); bfa_reqq_wcancel(&rp->reqq_wait); break; default: bfa_stats(rp, sm_fwc_unexp); bfa_sm_fault(rp->bfa, event); } } /* * Online state - normal parking state. */ static void bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event) { struct bfi_rport_qos_scn_s *qos_scn; bfa_trc(rp->bfa, rp->rport_tag); bfa_trc(rp->bfa, event); switch (event) { case BFA_RPORT_SM_OFFLINE: bfa_stats(rp, sm_on_off); if (bfa_rport_send_fwdelete(rp)) bfa_sm_set_state(rp, bfa_rport_sm_fwdelete); else bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull); break; case BFA_RPORT_SM_DELETE: bfa_stats(rp, sm_on_del); if (bfa_rport_send_fwdelete(rp)) bfa_sm_set_state(rp, bfa_rport_sm_deleting); else bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull); break; case BFA_RPORT_SM_HWFAIL: bfa_stats(rp, sm_on_hwf); bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); break; case BFA_RPORT_SM_SET_SPEED: bfa_rport_send_fwspeed(rp); break; case BFA_RPORT_SM_QOS_SCN: qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg; rp->qos_attr = qos_scn->new_qos_attr; bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id); bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id); bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority); bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority); qos_scn->old_qos_attr.qos_flow_id = be32_to_cpu(qos_scn->old_qos_attr.qos_flow_id); qos_scn->new_qos_attr.qos_flow_id = be32_to_cpu(qos_scn->new_qos_attr.qos_flow_id); if (qos_scn->old_qos_attr.qos_flow_id != qos_scn->new_qos_attr.qos_flow_id) bfa_cb_rport_qos_scn_flowid(rp->rport_drv, qos_scn->old_qos_attr, qos_scn->new_qos_attr); if (qos_scn->old_qos_attr.qos_priority != qos_scn->new_qos_attr.qos_priority) bfa_cb_rport_qos_scn_prio(rp->rport_drv, qos_scn->old_qos_attr, qos_scn->new_qos_attr); break; default: bfa_stats(rp, sm_on_unexp); bfa_sm_fault(rp->bfa, event); } } /* * Firmware rport is being deleted - awaiting f/w response. */ static void bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event) { bfa_trc(rp->bfa, rp->rport_tag); bfa_trc(rp->bfa, event); switch (event) { case BFA_RPORT_SM_FWRSP: bfa_stats(rp, sm_fwd_rsp); bfa_sm_set_state(rp, bfa_rport_sm_offline); bfa_rport_offline_cb(rp); break; case BFA_RPORT_SM_DELETE: bfa_stats(rp, sm_fwd_del); bfa_sm_set_state(rp, bfa_rport_sm_deleting); break; case BFA_RPORT_SM_HWFAIL: bfa_stats(rp, sm_fwd_hwf); bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); bfa_rport_offline_cb(rp); break; default: bfa_stats(rp, sm_fwd_unexp); bfa_sm_fault(rp->bfa, event); } } static void bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event) { bfa_trc(rp->bfa, rp->rport_tag); bfa_trc(rp->bfa, event); switch (event) { case BFA_RPORT_SM_QRESUME: bfa_sm_set_state(rp, bfa_rport_sm_fwdelete); bfa_rport_send_fwdelete(rp); break; case BFA_RPORT_SM_DELETE: bfa_stats(rp, sm_fwd_del); bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull); break; case BFA_RPORT_SM_HWFAIL: bfa_stats(rp, sm_fwd_hwf); bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); bfa_reqq_wcancel(&rp->reqq_wait); bfa_rport_offline_cb(rp); break; default: bfa_stats(rp, sm_fwd_unexp); bfa_sm_fault(rp->bfa, event); } } /* * Offline state. */ static void bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event) { bfa_trc(rp->bfa, rp->rport_tag); bfa_trc(rp->bfa, event); switch (event) { case BFA_RPORT_SM_DELETE: bfa_stats(rp, sm_off_del); bfa_sm_set_state(rp, bfa_rport_sm_uninit); bfa_rport_free(rp); break; case BFA_RPORT_SM_ONLINE: bfa_stats(rp, sm_off_on); if (bfa_rport_send_fwcreate(rp)) bfa_sm_set_state(rp, bfa_rport_sm_fwcreate); else bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull); break; case BFA_RPORT_SM_HWFAIL: bfa_stats(rp, sm_off_hwf); bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); break; case BFA_RPORT_SM_OFFLINE: bfa_rport_offline_cb(rp); break; default: bfa_stats(rp, sm_off_unexp); bfa_sm_fault(rp->bfa, event); } } /* * Rport is deleted, waiting for firmware response to delete. */ static void bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event) { bfa_trc(rp->bfa, rp->rport_tag); bfa_trc(rp->bfa, event); switch (event) { case BFA_RPORT_SM_FWRSP: bfa_stats(rp, sm_del_fwrsp); bfa_sm_set_state(rp, bfa_rport_sm_uninit); bfa_rport_free(rp); break; case BFA_RPORT_SM_HWFAIL: bfa_stats(rp, sm_del_hwf); bfa_sm_set_state(rp, bfa_rport_sm_uninit); bfa_rport_free(rp); break; default: bfa_sm_fault(rp->bfa, event); } } static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event) { bfa_trc(rp->bfa, rp->rport_tag); bfa_trc(rp->bfa, event); switch (event) { case BFA_RPORT_SM_QRESUME: bfa_stats(rp, sm_del_fwrsp); bfa_sm_set_state(rp, bfa_rport_sm_deleting); bfa_rport_send_fwdelete(rp); break; case BFA_RPORT_SM_HWFAIL: bfa_stats(rp, sm_del_hwf); bfa_sm_set_state(rp, bfa_rport_sm_uninit); bfa_reqq_wcancel(&rp->reqq_wait); bfa_rport_free(rp); break; default: bfa_sm_fault(rp->bfa, event); } } /* * Waiting for rport create response from firmware. A delete is pending. */ static void bfa_rport_sm_delete_pending(struct bfa_rport_s *rp, enum bfa_rport_event event) { bfa_trc(rp->bfa, rp->rport_tag); bfa_trc(rp->bfa, event); switch (event) { case BFA_RPORT_SM_FWRSP: bfa_stats(rp, sm_delp_fwrsp); if (bfa_rport_send_fwdelete(rp)) bfa_sm_set_state(rp, bfa_rport_sm_deleting); else bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull); break; case BFA_RPORT_SM_HWFAIL: bfa_stats(rp, sm_delp_hwf); bfa_sm_set_state(rp, bfa_rport_sm_uninit); bfa_rport_free(rp); break; default: bfa_stats(rp, sm_delp_unexp); bfa_sm_fault(rp->bfa, event); } } /* * Waiting for rport create response from firmware. Rport offline is pending. */ static void bfa_rport_sm_offline_pending(struct bfa_rport_s *rp, enum bfa_rport_event event) { bfa_trc(rp->bfa, rp->rport_tag); bfa_trc(rp->bfa, event); switch (event) { case BFA_RPORT_SM_FWRSP: bfa_stats(rp, sm_offp_fwrsp); if (bfa_rport_send_fwdelete(rp)) bfa_sm_set_state(rp, bfa_rport_sm_fwdelete); else bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull); break; case BFA_RPORT_SM_DELETE: bfa_stats(rp, sm_offp_del); bfa_sm_set_state(rp, bfa_rport_sm_delete_pending); break; case BFA_RPORT_SM_HWFAIL: bfa_stats(rp, sm_offp_hwf); bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); bfa_rport_offline_cb(rp); break; default: bfa_stats(rp, sm_offp_unexp); bfa_sm_fault(rp->bfa, event); } } /* * IOC h/w failed. */ static void bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event) { bfa_trc(rp->bfa, rp->rport_tag); bfa_trc(rp->bfa, event); switch (event) { case BFA_RPORT_SM_OFFLINE: bfa_stats(rp, sm_iocd_off); bfa_rport_offline_cb(rp); break; case BFA_RPORT_SM_DELETE: bfa_stats(rp, sm_iocd_del); bfa_sm_set_state(rp, bfa_rport_sm_uninit); bfa_rport_free(rp); break; case BFA_RPORT_SM_ONLINE: bfa_stats(rp, sm_iocd_on); if (bfa_rport_send_fwcreate(rp)) bfa_sm_set_state(rp, bfa_rport_sm_fwcreate); else bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull); break; case BFA_RPORT_SM_HWFAIL: break; default: bfa_stats(rp, sm_iocd_unexp); bfa_sm_fault(rp->bfa, event); } } /* * bfa_rport_private BFA rport private functions */ static void __bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete) { struct bfa_rport_s *rp = cbarg; if (complete) bfa_cb_rport_online(rp->rport_drv); } static void __bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete) { struct bfa_rport_s *rp = cbarg; if (complete) bfa_cb_rport_offline(rp->rport_drv); } static void bfa_rport_qresume(void *cbarg) { struct bfa_rport_s *rp = cbarg; bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME); } void bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo, struct bfa_s *bfa) { struct bfa_mem_kva_s *rport_kva = BFA_MEM_RPORT_KVA(bfa); if (cfg->fwcfg.num_rports < BFA_RPORT_MIN) cfg->fwcfg.num_rports = BFA_RPORT_MIN; /* kva memory */ bfa_mem_kva_setup(minfo, rport_kva, cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s)); } void bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev) { struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa); struct bfa_rport_s *rp; u16 i; INIT_LIST_HEAD(&mod->rp_free_q); INIT_LIST_HEAD(&mod->rp_active_q); INIT_LIST_HEAD(&mod->rp_unused_q); rp = (struct bfa_rport_s *) bfa_mem_kva_curp(mod); mod->rps_list = rp; mod->num_rports = cfg->fwcfg.num_rports; WARN_ON(!mod->num_rports || (mod->num_rports & (mod->num_rports - 1))); for (i = 0; i < mod->num_rports; i++, rp++) { memset(rp, 0, sizeof(struct bfa_rport_s)); rp->bfa = bfa; rp->rport_tag = i; bfa_sm_set_state(rp, bfa_rport_sm_uninit); /* * - is unused */ if (i) list_add_tail(&rp->qe, &mod->rp_free_q); bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp); } /* * consume memory */ bfa_mem_kva_curp(mod) = (u8 *) rp; } void bfa_rport_iocdisable(struct bfa_s *bfa) { struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa); struct bfa_rport_s *rport; struct list_head *qe, *qen; /* Enqueue unused rport resources to free_q */ list_splice_tail_init(&mod->rp_unused_q, &mod->rp_free_q); list_for_each_safe(qe, qen, &mod->rp_active_q) { rport = (struct bfa_rport_s *) qe; bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL); } } static struct bfa_rport_s * bfa_rport_alloc(struct bfa_rport_mod_s *mod) { struct bfa_rport_s *rport; bfa_q_deq(&mod->rp_free_q, &rport); if (rport) list_add_tail(&rport->qe, &mod->rp_active_q); return rport; } static void bfa_rport_free(struct bfa_rport_s *rport) { struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa); WARN_ON(!bfa_q_is_on_q(&mod->rp_active_q, rport)); list_del(&rport->qe); list_add_tail(&rport->qe, &mod->rp_free_q); } static bfa_boolean_t bfa_rport_send_fwcreate(struct bfa_rport_s *rp) { struct bfi_rport_create_req_s *m; /* * check for room in queue to send request now */ m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT); if (!m) { bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait); return BFA_FALSE; } bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ, bfa_fn_lpu(rp->bfa)); m->bfa_handle = rp->rport_tag; m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz); m->pid = rp->rport_info.pid; m->lp_fwtag = bfa_lps_get_fwtag(rp->bfa, (u8)rp->rport_info.lp_tag); m->local_pid = rp->rport_info.local_pid; m->fc_class = rp->rport_info.fc_class; m->vf_en = rp->rport_info.vf_en; m->vf_id = rp->rport_info.vf_id; m->cisc = rp->rport_info.cisc; /* * queue I/O message to firmware */ bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh); return BFA_TRUE; } static bfa_boolean_t bfa_rport_send_fwdelete(struct bfa_rport_s *rp) { struct bfi_rport_delete_req_s *m; /* * check for room in queue to send request now */ m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT); if (!m) { bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait); return BFA_FALSE; } bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ, bfa_fn_lpu(rp->bfa)); m->fw_handle = rp->fw_handle; /* * queue I/O message to firmware */ bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh); return BFA_TRUE; } static bfa_boolean_t bfa_rport_send_fwspeed(struct bfa_rport_s *rp) { struct bfa_rport_speed_req_s *m; /* * check for room in queue to send request now */ m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT); if (!m) { bfa_trc(rp->bfa, rp->rport_info.speed); return BFA_FALSE; } bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ, bfa_fn_lpu(rp->bfa)); m->fw_handle = rp->fw_handle; m->speed = (u8)rp->rport_info.speed; /* * queue I/O message to firmware */ bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh); return BFA_TRUE; } /* * bfa_rport_public */ /* * Rport interrupt processing. */ void bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m) { union bfi_rport_i2h_msg_u msg; struct bfa_rport_s *rp; bfa_trc(bfa, m->mhdr.msg_id); msg.msg = m; switch (m->mhdr.msg_id) { case BFI_RPORT_I2H_CREATE_RSP: rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle); rp->fw_handle = msg.create_rsp->fw_handle; rp->qos_attr = msg.create_rsp->qos_attr; bfa_rport_set_lunmask(bfa, rp); WARN_ON(msg.create_rsp->status != BFA_STATUS_OK); bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP); break; case BFI_RPORT_I2H_DELETE_RSP: rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle); WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK); bfa_rport_unset_lunmask(bfa, rp); bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP); break; case BFI_RPORT_I2H_QOS_SCN: rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle); rp->event_arg.fw_msg = msg.qos_scn_evt; bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN); break; case BFI_RPORT_I2H_LIP_SCN_ONLINE: bfa_fcport_update_loop_info(BFA_FCPORT_MOD(bfa), &msg.lip_scn->loop_info); bfa_cb_rport_scn_online(bfa); break; case BFI_RPORT_I2H_LIP_SCN_OFFLINE: bfa_cb_rport_scn_offline(bfa); break; case BFI_RPORT_I2H_NO_DEV: rp = BFA_RPORT_FROM_TAG(bfa, msg.lip_scn->bfa_handle); bfa_cb_rport_scn_no_dev(rp->rport_drv); break; default: bfa_trc(bfa, m->mhdr.msg_id); WARN_ON(1); } } void bfa_rport_res_recfg(struct bfa_s *bfa, u16 num_rport_fw) { struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa); struct list_head *qe; int i; for (i = 0; i < (mod->num_rports - num_rport_fw); i++) { bfa_q_deq_tail(&mod->rp_free_q, &qe); list_add_tail(qe, &mod->rp_unused_q); } } /* * bfa_rport_api */ struct bfa_rport_s * bfa_rport_create(struct bfa_s *bfa, void *rport_drv) { struct bfa_rport_s *rp; rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa)); if (rp == NULL) return NULL; rp->bfa = bfa; rp->rport_drv = rport_drv; memset(&rp->stats, 0, sizeof(rp->stats)); WARN_ON(!bfa_sm_cmp_state(rp, bfa_rport_sm_uninit)); bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE); return rp; } void bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info) { WARN_ON(rport_info->max_frmsz == 0); /* * Some JBODs are seen to be not setting PDU size correctly in PLOGI * responses. Default to minimum size. */ if (rport_info->max_frmsz == 0) { bfa_trc(rport->bfa, rport->rport_tag); rport_info->max_frmsz = FC_MIN_PDUSZ; } rport->rport_info = *rport_info; bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE); } void bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed) { WARN_ON(speed == 0); WARN_ON(speed == BFA_PORT_SPEED_AUTO); if (rport) { rport->rport_info.speed = speed; bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED); } } /* Set Rport LUN Mask */ void bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp) { struct bfa_lps_mod_s *lps_mod = BFA_LPS_MOD(bfa); wwn_t lp_wwn, rp_wwn; u8 lp_tag = (u8)rp->rport_info.lp_tag; rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn; lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn; BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask = rp->lun_mask = BFA_TRUE; bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn, rp->rport_tag, lp_tag); } /* Unset Rport LUN mask */ void bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp) { struct bfa_lps_mod_s *lps_mod = BFA_LPS_MOD(bfa); wwn_t lp_wwn, rp_wwn; rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn; lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn; BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask = rp->lun_mask = BFA_FALSE; bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn, BFA_RPORT_TAG_INVALID, BFA_LP_TAG_INVALID); } /* * SGPG related functions */ /* * Compute and return memory needed by FCP(im) module. */ void bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo, struct bfa_s *bfa) { struct bfa_sgpg_mod_s *sgpg_mod = BFA_SGPG_MOD(bfa); struct bfa_mem_kva_s *sgpg_kva = BFA_MEM_SGPG_KVA(bfa); struct bfa_mem_dma_s *seg_ptr; u16 nsegs, idx, per_seg_sgpg, num_sgpg; u32 sgpg_sz = sizeof(struct bfi_sgpg_s); if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN) cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN; else if (cfg->drvcfg.num_sgpgs > BFA_SGPG_MAX) cfg->drvcfg.num_sgpgs = BFA_SGPG_MAX; num_sgpg = cfg->drvcfg.num_sgpgs; nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz); per_seg_sgpg = BFI_MEM_NREQS_SEG(sgpg_sz); bfa_mem_dma_seg_iter(sgpg_mod, seg_ptr, nsegs, idx) { if (num_sgpg >= per_seg_sgpg) { num_sgpg -= per_seg_sgpg; bfa_mem_dma_setup(minfo, seg_ptr, per_seg_sgpg * sgpg_sz); } else bfa_mem_dma_setup(minfo, seg_ptr, num_sgpg * sgpg_sz); } /* kva memory */ bfa_mem_kva_setup(minfo, sgpg_kva, cfg->drvcfg.num_sgpgs * sizeof(struct bfa_sgpg_s)); } void bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev) { struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa); struct bfa_sgpg_s *hsgpg; struct bfi_sgpg_s *sgpg; u64 align_len; struct bfa_mem_dma_s *seg_ptr; u32 sgpg_sz = sizeof(struct bfi_sgpg_s); u16 i, idx, nsegs, per_seg_sgpg, num_sgpg; union { u64 pa; union bfi_addr_u addr; } sgpg_pa, sgpg_pa_tmp; INIT_LIST_HEAD(&mod->sgpg_q); INIT_LIST_HEAD(&mod->sgpg_wait_q); bfa_trc(bfa, cfg->drvcfg.num_sgpgs); mod->free_sgpgs = mod->num_sgpgs = cfg->drvcfg.num_sgpgs; num_sgpg = cfg->drvcfg.num_sgpgs; nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz); /* dma/kva mem claim */ hsgpg = (struct bfa_sgpg_s *) bfa_mem_kva_curp(mod); bfa_mem_dma_seg_iter(mod, seg_ptr, nsegs, idx) { if (!bfa_mem_dma_virt(seg_ptr)) break; align_len = BFA_SGPG_ROUNDUP(bfa_mem_dma_phys(seg_ptr)) - bfa_mem_dma_phys(seg_ptr); sgpg = (struct bfi_sgpg_s *) (((u8 *) bfa_mem_dma_virt(seg_ptr)) + align_len); sgpg_pa.pa = bfa_mem_dma_phys(seg_ptr) + align_len; WARN_ON(sgpg_pa.pa & (sgpg_sz - 1)); per_seg_sgpg = (seg_ptr->mem_len - (u32)align_len) / sgpg_sz; for (i = 0; num_sgpg > 0 && i < per_seg_sgpg; i++, num_sgpg--) { memset(hsgpg, 0, sizeof(*hsgpg)); memset(sgpg, 0, sizeof(*sgpg)); hsgpg->sgpg = sgpg; sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa); hsgpg->sgpg_pa = sgpg_pa_tmp.addr; list_add_tail(&hsgpg->qe, &mod->sgpg_q); sgpg++; hsgpg++; sgpg_pa.pa += sgpg_sz; } } bfa_mem_kva_curp(mod) = (u8 *) hsgpg; } bfa_status_t bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs) { struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa); struct bfa_sgpg_s *hsgpg; int i; if (mod->free_sgpgs < nsgpgs) return BFA_STATUS_ENOMEM; for (i = 0; i < nsgpgs; i++) { bfa_q_deq(&mod->sgpg_q, &hsgpg); WARN_ON(!hsgpg); list_add_tail(&hsgpg->qe, sgpg_q); } mod->free_sgpgs -= nsgpgs; return BFA_STATUS_OK; } void bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg) { struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa); struct bfa_sgpg_wqe_s *wqe; mod->free_sgpgs += nsgpg; WARN_ON(mod->free_sgpgs > mod->num_sgpgs); list_splice_tail_init(sgpg_q, &mod->sgpg_q); if (list_empty(&mod->sgpg_wait_q)) return; /* * satisfy as many waiting requests as possible */ do { wqe = bfa_q_first(&mod->sgpg_wait_q); if (mod->free_sgpgs < wqe->nsgpg) nsgpg = mod->free_sgpgs; else nsgpg = wqe->nsgpg; bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg); wqe->nsgpg -= nsgpg; if (wqe->nsgpg == 0) { list_del(&wqe->qe); wqe->cbfn(wqe->cbarg); } } while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q)); } void bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg) { struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa); WARN_ON(nsgpg <= 0); WARN_ON(nsgpg <= mod->free_sgpgs); wqe->nsgpg_total = wqe->nsgpg = nsgpg; /* * allocate any left to this one first */ if (mod->free_sgpgs) { /* * no one else is waiting for SGPG */ WARN_ON(!list_empty(&mod->sgpg_wait_q)); list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q); wqe->nsgpg -= mod->free_sgpgs; mod->free_sgpgs = 0; } list_add_tail(&wqe->qe, &mod->sgpg_wait_q); } void bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe) { struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa); WARN_ON(!bfa_q_is_on_q(&mod->sgpg_wait_q, wqe)); list_del(&wqe->qe); if (wqe->nsgpg_total != wqe->nsgpg) bfa_sgpg_mfree(bfa, &wqe->sgpg_q, wqe->nsgpg_total - wqe->nsgpg); } void bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg), void *cbarg) { INIT_LIST_HEAD(&wqe->sgpg_q); wqe->cbfn = cbfn; wqe->cbarg = cbarg; } /* * UF related functions */ /* ***************************************************************************** * Internal functions ***************************************************************************** */ static void __bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete) { struct bfa_uf_s *uf = cbarg; struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa); if (complete) ufm->ufrecv(ufm->cbarg, uf); } static void claim_uf_post_msgs(struct bfa_uf_mod_s *ufm) { struct bfi_uf_buf_post_s *uf_bp_msg; u16 i; u16 buf_len; ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_mem_kva_curp(ufm); uf_bp_msg = ufm->uf_buf_posts; for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs; i++, uf_bp_msg++) { memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s)); uf_bp_msg->buf_tag = i; buf_len = sizeof(struct bfa_uf_buf_s); uf_bp_msg->buf_len = cpu_to_be16(buf_len); bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST, bfa_fn_lpu(ufm->bfa)); bfa_alen_set(&uf_bp_msg->alen, buf_len, ufm_pbs_pa(ufm, i)); } /* * advance pointer beyond consumed memory */ bfa_mem_kva_curp(ufm) = (u8 *) uf_bp_msg; } static void claim_ufs(struct bfa_uf_mod_s *ufm) { u16 i; struct bfa_uf_s *uf; /* * Claim block of memory for UF list */ ufm->uf_list = (struct bfa_uf_s *) bfa_mem_kva_curp(ufm); /* * Initialize UFs and queue it in UF free queue */ for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) { memset(uf, 0, sizeof(struct bfa_uf_s)); uf->bfa = ufm->bfa; uf->uf_tag = i; uf->pb_len = BFA_PER_UF_DMA_SZ; uf->buf_kva = bfa_mem_get_dmabuf_kva(ufm, i, BFA_PER_UF_DMA_SZ); uf->buf_pa = ufm_pbs_pa(ufm, i); list_add_tail(&uf->qe, &ufm->uf_free_q); } /* * advance memory pointer */ bfa_mem_kva_curp(ufm) = (u8 *) uf; } static void uf_mem_claim(struct bfa_uf_mod_s *ufm) { claim_ufs(ufm); claim_uf_post_msgs(ufm); } void bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo, struct bfa_s *bfa) { struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa); struct bfa_mem_kva_s *uf_kva = BFA_MEM_UF_KVA(bfa); u32 num_ufs = cfg->fwcfg.num_uf_bufs; struct bfa_mem_dma_s *seg_ptr; u16 nsegs, idx, per_seg_uf = 0; nsegs = BFI_MEM_DMA_NSEGS(num_ufs, BFA_PER_UF_DMA_SZ); per_seg_uf = BFI_MEM_NREQS_SEG(BFA_PER_UF_DMA_SZ); bfa_mem_dma_seg_iter(ufm, seg_ptr, nsegs, idx) { if (num_ufs >= per_seg_uf) { num_ufs -= per_seg_uf; bfa_mem_dma_setup(minfo, seg_ptr, per_seg_uf * BFA_PER_UF_DMA_SZ); } else bfa_mem_dma_setup(minfo, seg_ptr, num_ufs * BFA_PER_UF_DMA_SZ); } /* kva memory */ bfa_mem_kva_setup(minfo, uf_kva, cfg->fwcfg.num_uf_bufs * (sizeof(struct bfa_uf_s) + sizeof(struct bfi_uf_buf_post_s))); } void bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev) { struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa); ufm->bfa = bfa; ufm->num_ufs = cfg->fwcfg.num_uf_bufs; INIT_LIST_HEAD(&ufm->uf_free_q); INIT_LIST_HEAD(&ufm->uf_posted_q); INIT_LIST_HEAD(&ufm->uf_unused_q); uf_mem_claim(ufm); } static struct bfa_uf_s * bfa_uf_get(struct bfa_uf_mod_s *uf_mod) { struct bfa_uf_s *uf; bfa_q_deq(&uf_mod->uf_free_q, &uf); return uf; } static void bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf) { list_add_tail(&uf->qe, &uf_mod->uf_free_q); } static bfa_status_t bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf) { struct bfi_uf_buf_post_s *uf_post_msg; uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP); if (!uf_post_msg) return BFA_STATUS_FAILED; memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag], sizeof(struct bfi_uf_buf_post_s)); bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP, uf_post_msg->mh); bfa_trc(ufm->bfa, uf->uf_tag); list_add_tail(&uf->qe, &ufm->uf_posted_q); return BFA_STATUS_OK; } static void bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod) { struct bfa_uf_s *uf; while ((uf = bfa_uf_get(uf_mod)) != NULL) { if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK) break; } } static void uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m) { struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa); u16 uf_tag = m->buf_tag; struct bfa_uf_s *uf = &ufm->uf_list[uf_tag]; struct bfa_uf_buf_s *uf_buf; uint8_t *buf; uf_buf = (struct bfa_uf_buf_s *) bfa_mem_get_dmabuf_kva(ufm, uf_tag, uf->pb_len); buf = &uf_buf->d[0]; m->frm_len = be16_to_cpu(m->frm_len); m->xfr_len = be16_to_cpu(m->xfr_len); list_del(&uf->qe); /* dequeue from posted queue */ uf->data_ptr = buf; uf->data_len = m->xfr_len; WARN_ON(uf->data_len < sizeof(struct fchs_s)); if (uf->data_len == sizeof(struct fchs_s)) { bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX, uf->data_len, (struct fchs_s *)buf); } else { u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s))); bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX, uf->data_len, (struct fchs_s *)buf, pld_w0); } if (bfa->fcs) __bfa_cb_uf_recv(uf, BFA_TRUE); else bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf); } void bfa_uf_iocdisable(struct bfa_s *bfa) { struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa); struct bfa_uf_s *uf; struct list_head *qe, *qen; /* Enqueue unused uf resources to free_q */ list_splice_tail_init(&ufm->uf_unused_q, &ufm->uf_free_q); list_for_each_safe(qe, qen, &ufm->uf_posted_q) { uf = (struct bfa_uf_s *) qe; list_del(&uf->qe); bfa_uf_put(ufm, uf); } } void bfa_uf_start(struct bfa_s *bfa) { bfa_uf_post_all(BFA_UF_MOD(bfa)); } /* * Register handler for all unsolicted receive frames. * * @param[in] bfa BFA instance * @param[in] ufrecv receive handler function * @param[in] cbarg receive handler arg */ void bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg) { struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa); ufm->ufrecv = ufrecv; ufm->cbarg = cbarg; } /* * Free an unsolicited frame back to BFA. * * @param[in] uf unsolicited frame to be freed * * @return None */ void bfa_uf_free(struct bfa_uf_s *uf) { bfa_uf_put(BFA_UF_MOD(uf->bfa), uf); bfa_uf_post_all(BFA_UF_MOD(uf->bfa)); } /* * uf_pub BFA uf module public functions */ void bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) { bfa_trc(bfa, msg->mhdr.msg_id); switch (msg->mhdr.msg_id) { case BFI_UF_I2H_FRM_RCVD: uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg); break; default: bfa_trc(bfa, msg->mhdr.msg_id); WARN_ON(1); } } void bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw) { struct bfa_uf_mod_s *mod = BFA_UF_MOD(bfa); struct list_head *qe; int i; for (i = 0; i < (mod->num_ufs - num_uf_fw); i++) { bfa_q_deq_tail(&mod->uf_free_q, &qe); list_add_tail(qe, &mod->uf_unused_q); } } /* * Dport forward declaration */ enum bfa_dport_test_state_e { BFA_DPORT_ST_DISABLED = 0, /*!< dport is disabled */ BFA_DPORT_ST_INP = 1, /*!< test in progress */ BFA_DPORT_ST_COMP = 2, /*!< test complete successfully */ BFA_DPORT_ST_NO_SFP = 3, /*!< sfp is not present */ BFA_DPORT_ST_NOTSTART = 4, /*!< test not start dport is enabled */ }; /* * BFA DPORT state machine events */ enum bfa_dport_sm_event { BFA_DPORT_SM_ENABLE = 1, /* dport enable event */ BFA_DPORT_SM_DISABLE = 2, /* dport disable event */ BFA_DPORT_SM_FWRSP = 3, /* fw enable/disable rsp */ BFA_DPORT_SM_QRESUME = 4, /* CQ space available */ BFA_DPORT_SM_HWFAIL = 5, /* IOC h/w failure */ BFA_DPORT_SM_START = 6, /* re-start dport test */ BFA_DPORT_SM_REQFAIL = 7, /* request failure */ BFA_DPORT_SM_SCN = 8, /* state change notify frm fw */ }; static void bfa_dport_sm_disabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event); static void bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport, enum bfa_dport_sm_event event); static void bfa_dport_sm_enabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event); static void bfa_dport_sm_enabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event); static void bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport, enum bfa_dport_sm_event event); static void bfa_dport_sm_disabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event); static void bfa_dport_sm_starting_qwait(struct bfa_dport_s *dport, enum bfa_dport_sm_event event); static void bfa_dport_sm_starting(struct bfa_dport_s *dport, enum bfa_dport_sm_event event); static void bfa_dport_sm_dynamic_disabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event); static void bfa_dport_sm_dynamic_disabling_qwait(struct bfa_dport_s *dport, enum bfa_dport_sm_event event); static void bfa_dport_qresume(void *cbarg); static void bfa_dport_req_comp(struct bfa_dport_s *dport, struct bfi_diag_dport_rsp_s *msg); static void bfa_dport_scn(struct bfa_dport_s *dport, struct bfi_diag_dport_scn_s *msg); /* * BFA fcdiag module */ #define BFA_DIAG_QTEST_TOV 1000 /* msec */ /* * Set port status to busy */ static void bfa_fcdiag_set_busy_status(struct bfa_fcdiag_s *fcdiag) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(fcdiag->bfa); if (fcdiag->lb.lock) fcport->diag_busy = BFA_TRUE; else fcport->diag_busy = BFA_FALSE; } void bfa_fcdiag_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev) { struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); struct bfa_dport_s *dport = &fcdiag->dport; fcdiag->bfa = bfa; fcdiag->trcmod = bfa->trcmod; /* The common DIAG attach bfa_diag_attach() will do all memory claim */ dport->bfa = bfa; bfa_sm_set_state(dport, bfa_dport_sm_disabled); bfa_reqq_winit(&dport->reqq_wait, bfa_dport_qresume, dport); dport->cbfn = NULL; dport->cbarg = NULL; dport->test_state = BFA_DPORT_ST_DISABLED; memset(&dport->result, 0, sizeof(struct bfa_diag_dport_result_s)); } void bfa_fcdiag_iocdisable(struct bfa_s *bfa) { struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); struct bfa_dport_s *dport = &fcdiag->dport; bfa_trc(fcdiag, fcdiag->lb.lock); if (fcdiag->lb.lock) { fcdiag->lb.status = BFA_STATUS_IOC_FAILURE; fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status); fcdiag->lb.lock = 0; bfa_fcdiag_set_busy_status(fcdiag); } bfa_sm_send_event(dport, BFA_DPORT_SM_HWFAIL); } static void bfa_fcdiag_queuetest_timeout(void *cbarg) { struct bfa_fcdiag_s *fcdiag = cbarg; struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result; bfa_trc(fcdiag, fcdiag->qtest.all); bfa_trc(fcdiag, fcdiag->qtest.count); fcdiag->qtest.timer_active = 0; res->status = BFA_STATUS_ETIMER; res->count = QTEST_CNT_DEFAULT - fcdiag->qtest.count; if (fcdiag->qtest.all) res->queue = fcdiag->qtest.all; bfa_trc(fcdiag, BFA_STATUS_ETIMER); fcdiag->qtest.status = BFA_STATUS_ETIMER; fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status); fcdiag->qtest.lock = 0; } static bfa_status_t bfa_fcdiag_queuetest_send(struct bfa_fcdiag_s *fcdiag) { u32 i; struct bfi_diag_qtest_req_s *req; req = bfa_reqq_next(fcdiag->bfa, fcdiag->qtest.queue); if (!req) return BFA_STATUS_DEVBUSY; /* build host command */ bfi_h2i_set(req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_QTEST, bfa_fn_lpu(fcdiag->bfa)); for (i = 0; i < BFI_LMSG_PL_WSZ; i++) req->data[i] = QTEST_PAT_DEFAULT; bfa_trc(fcdiag, fcdiag->qtest.queue); /* ring door bell */ bfa_reqq_produce(fcdiag->bfa, fcdiag->qtest.queue, req->mh); return BFA_STATUS_OK; } static void bfa_fcdiag_queuetest_comp(struct bfa_fcdiag_s *fcdiag, bfi_diag_qtest_rsp_t *rsp) { struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result; bfa_status_t status = BFA_STATUS_OK; int i; /* Check timer, should still be active */ if (!fcdiag->qtest.timer_active) { bfa_trc(fcdiag, fcdiag->qtest.timer_active); return; } /* update count */ fcdiag->qtest.count--; /* Check result */ for (i = 0; i < BFI_LMSG_PL_WSZ; i++) { if (rsp->data[i] != ~(QTEST_PAT_DEFAULT)) { res->status = BFA_STATUS_DATACORRUPTED; break; } } if (res->status == BFA_STATUS_OK) { if (fcdiag->qtest.count > 0) { status = bfa_fcdiag_queuetest_send(fcdiag); if (status == BFA_STATUS_OK) return; else res->status = status; } else if (fcdiag->qtest.all > 0 && fcdiag->qtest.queue < (BFI_IOC_MAX_CQS - 1)) { fcdiag->qtest.count = QTEST_CNT_DEFAULT; fcdiag->qtest.queue++; status = bfa_fcdiag_queuetest_send(fcdiag); if (status == BFA_STATUS_OK) return; else res->status = status; } } /* Stop timer when we comp all queue */ if (fcdiag->qtest.timer_active) { bfa_timer_stop(&fcdiag->qtest.timer); fcdiag->qtest.timer_active = 0; } res->queue = fcdiag->qtest.queue; res->count = QTEST_CNT_DEFAULT - fcdiag->qtest.count; bfa_trc(fcdiag, res->count); bfa_trc(fcdiag, res->status); fcdiag->qtest.status = res->status; fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status); fcdiag->qtest.lock = 0; } static void bfa_fcdiag_loopback_comp(struct bfa_fcdiag_s *fcdiag, struct bfi_diag_lb_rsp_s *rsp) { struct bfa_diag_loopback_result_s *res = fcdiag->lb.result; res->numtxmfrm = be32_to_cpu(rsp->res.numtxmfrm); res->numosffrm = be32_to_cpu(rsp->res.numosffrm); res->numrcvfrm = be32_to_cpu(rsp->res.numrcvfrm); res->badfrminf = be32_to_cpu(rsp->res.badfrminf); res->badfrmnum = be32_to_cpu(rsp->res.badfrmnum); res->status = rsp->res.status; fcdiag->lb.status = rsp->res.status; bfa_trc(fcdiag, fcdiag->lb.status); fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status); fcdiag->lb.lock = 0; bfa_fcdiag_set_busy_status(fcdiag); } static bfa_status_t bfa_fcdiag_loopback_send(struct bfa_fcdiag_s *fcdiag, struct bfa_diag_loopback_s *loopback) { struct bfi_diag_lb_req_s *lb_req; lb_req = bfa_reqq_next(fcdiag->bfa, BFA_REQQ_DIAG); if (!lb_req) return BFA_STATUS_DEVBUSY; /* build host command */ bfi_h2i_set(lb_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LOOPBACK, bfa_fn_lpu(fcdiag->bfa)); lb_req->lb_mode = loopback->lb_mode; lb_req->speed = loopback->speed; lb_req->loopcnt = loopback->loopcnt; lb_req->pattern = loopback->pattern; /* ring door bell */ bfa_reqq_produce(fcdiag->bfa, BFA_REQQ_DIAG, lb_req->mh); bfa_trc(fcdiag, loopback->lb_mode); bfa_trc(fcdiag, loopback->speed); bfa_trc(fcdiag, loopback->loopcnt); bfa_trc(fcdiag, loopback->pattern); return BFA_STATUS_OK; } /* * cpe/rme intr handler */ void bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg) { struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); switch (msg->mhdr.msg_id) { case BFI_DIAG_I2H_LOOPBACK: bfa_fcdiag_loopback_comp(fcdiag, (struct bfi_diag_lb_rsp_s *) msg); break; case BFI_DIAG_I2H_QTEST: bfa_fcdiag_queuetest_comp(fcdiag, (bfi_diag_qtest_rsp_t *)msg); break; case BFI_DIAG_I2H_DPORT: bfa_dport_req_comp(&fcdiag->dport, (struct bfi_diag_dport_rsp_s *)msg); break; case BFI_DIAG_I2H_DPORT_SCN: bfa_dport_scn(&fcdiag->dport, (struct bfi_diag_dport_scn_s *)msg); break; default: bfa_trc(fcdiag, msg->mhdr.msg_id); WARN_ON(1); } } /* * Loopback test * * @param[in] *bfa - bfa data struct * @param[in] opmode - port operation mode * @param[in] speed - port speed * @param[in] lpcnt - loop count * @param[in] pat - pattern to build packet * @param[in] *result - pt to bfa_diag_loopback_result_t data struct * @param[in] cbfn - callback function * @param[in] cbarg - callback functioin arg * * @param[out] */ bfa_status_t bfa_fcdiag_loopback(struct bfa_s *bfa, enum bfa_port_opmode opmode, enum bfa_port_speed speed, u32 lpcnt, u32 pat, struct bfa_diag_loopback_result_s *result, bfa_cb_diag_t cbfn, void *cbarg) { struct bfa_diag_loopback_s loopback; struct bfa_port_attr_s attr; bfa_status_t status; struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); if (!bfa_iocfc_is_operational(bfa)) return BFA_STATUS_IOC_NON_OP; /* if port is PBC disabled, return error */ if (bfa_fcport_is_pbcdisabled(bfa)) { bfa_trc(fcdiag, BFA_STATUS_PBC); return BFA_STATUS_PBC; } if (bfa_fcport_is_disabled(bfa) == BFA_FALSE) { bfa_trc(fcdiag, opmode); return BFA_STATUS_PORT_NOT_DISABLED; } /* * Check if input speed is supported by the port mode */ if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) { if (!(speed == BFA_PORT_SPEED_1GBPS || speed == BFA_PORT_SPEED_2GBPS || speed == BFA_PORT_SPEED_4GBPS || speed == BFA_PORT_SPEED_8GBPS || speed == BFA_PORT_SPEED_16GBPS || speed == BFA_PORT_SPEED_AUTO)) { bfa_trc(fcdiag, speed); return BFA_STATUS_UNSUPP_SPEED; } bfa_fcport_get_attr(bfa, &attr); bfa_trc(fcdiag, attr.speed_supported); if (speed > attr.speed_supported) return BFA_STATUS_UNSUPP_SPEED; } else { if (speed != BFA_PORT_SPEED_10GBPS) { bfa_trc(fcdiag, speed); return BFA_STATUS_UNSUPP_SPEED; } } /* * For CT2, 1G is not supported */ if ((speed == BFA_PORT_SPEED_1GBPS) && (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id))) { bfa_trc(fcdiag, speed); return BFA_STATUS_UNSUPP_SPEED; } /* For Mezz card, port speed entered needs to be checked */ if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) { if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) { if (!(speed == BFA_PORT_SPEED_1GBPS || speed == BFA_PORT_SPEED_2GBPS || speed == BFA_PORT_SPEED_4GBPS || speed == BFA_PORT_SPEED_8GBPS || speed == BFA_PORT_SPEED_16GBPS || speed == BFA_PORT_SPEED_AUTO)) return BFA_STATUS_UNSUPP_SPEED; } else { if (speed != BFA_PORT_SPEED_10GBPS) return BFA_STATUS_UNSUPP_SPEED; } } /* check to see if fcport is dport */ if (bfa_fcport_is_dport(bfa)) { bfa_trc(fcdiag, fcdiag->lb.lock); return BFA_STATUS_DPORT_ENABLED; } /* check to see if there is another destructive diag cmd running */ if (fcdiag->lb.lock) { bfa_trc(fcdiag, fcdiag->lb.lock); return BFA_STATUS_DEVBUSY; } fcdiag->lb.lock = 1; loopback.lb_mode = opmode; loopback.speed = speed; loopback.loopcnt = lpcnt; loopback.pattern = pat; fcdiag->lb.result = result; fcdiag->lb.cbfn = cbfn; fcdiag->lb.cbarg = cbarg; memset(result, 0, sizeof(struct bfa_diag_loopback_result_s)); bfa_fcdiag_set_busy_status(fcdiag); /* Send msg to fw */ status = bfa_fcdiag_loopback_send(fcdiag, &loopback); return status; } /* * DIAG queue test command * * @param[in] *bfa - bfa data struct * @param[in] force - 1: don't do ioc op checking * @param[in] queue - queue no. to test * @param[in] *result - pt to bfa_diag_qtest_result_t data struct * @param[in] cbfn - callback function * @param[in] *cbarg - callback functioin arg * * @param[out] */ bfa_status_t bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 force, u32 queue, struct bfa_diag_qtest_result_s *result, bfa_cb_diag_t cbfn, void *cbarg) { struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); bfa_status_t status; bfa_trc(fcdiag, force); bfa_trc(fcdiag, queue); if (!force && !bfa_iocfc_is_operational(bfa)) return BFA_STATUS_IOC_NON_OP; /* check to see if there is another destructive diag cmd running */ if (fcdiag->qtest.lock) { bfa_trc(fcdiag, fcdiag->qtest.lock); return BFA_STATUS_DEVBUSY; } /* Initialization */ fcdiag->qtest.lock = 1; fcdiag->qtest.cbfn = cbfn; fcdiag->qtest.cbarg = cbarg; fcdiag->qtest.result = result; fcdiag->qtest.count = QTEST_CNT_DEFAULT; /* Init test results */ fcdiag->qtest.result->status = BFA_STATUS_OK; fcdiag->qtest.result->count = 0; /* send */ if (queue < BFI_IOC_MAX_CQS) { fcdiag->qtest.result->queue = (u8)queue; fcdiag->qtest.queue = (u8)queue; fcdiag->qtest.all = 0; } else { fcdiag->qtest.result->queue = 0; fcdiag->qtest.queue = 0; fcdiag->qtest.all = 1; } status = bfa_fcdiag_queuetest_send(fcdiag); /* Start a timer */ if (status == BFA_STATUS_OK) { bfa_timer_start(bfa, &fcdiag->qtest.timer, bfa_fcdiag_queuetest_timeout, fcdiag, BFA_DIAG_QTEST_TOV); fcdiag->qtest.timer_active = 1; } return status; } /* * DIAG PLB is running * * @param[in] *bfa - bfa data struct * * @param[out] */ bfa_status_t bfa_fcdiag_lb_is_running(struct bfa_s *bfa) { struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); return fcdiag->lb.lock ? BFA_STATUS_DIAG_BUSY : BFA_STATUS_OK; } /* * D-port */ #define bfa_dport_result_start(__dport, __mode) do { \ (__dport)->result.start_time = ktime_get_real_seconds(); \ (__dport)->result.status = DPORT_TEST_ST_INPRG; \ (__dport)->result.mode = (__mode); \ (__dport)->result.rp_pwwn = (__dport)->rp_pwwn; \ (__dport)->result.rp_nwwn = (__dport)->rp_nwwn; \ (__dport)->result.lpcnt = (__dport)->lpcnt; \ } while (0) static bfa_boolean_t bfa_dport_send_req(struct bfa_dport_s *dport, enum bfi_dport_req req); static void bfa_cb_fcdiag_dport(struct bfa_dport_s *dport, bfa_status_t bfa_status) { if (dport->cbfn != NULL) { dport->cbfn(dport->cbarg, bfa_status); dport->cbfn = NULL; dport->cbarg = NULL; } } static void bfa_dport_sm_disabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event) { bfa_trc(dport->bfa, event); switch (event) { case BFA_DPORT_SM_ENABLE: bfa_fcport_dportenable(dport->bfa); if (bfa_dport_send_req(dport, BFI_DPORT_ENABLE)) bfa_sm_set_state(dport, bfa_dport_sm_enabling); else bfa_sm_set_state(dport, bfa_dport_sm_enabling_qwait); break; case BFA_DPORT_SM_DISABLE: /* Already disabled */ break; case BFA_DPORT_SM_HWFAIL: /* ignore */ break; case BFA_DPORT_SM_SCN: if (dport->i2hmsg.scn.state == BFI_DPORT_SCN_DDPORT_ENABLE) { bfa_fcport_ddportenable(dport->bfa); dport->dynamic = BFA_TRUE; dport->test_state = BFA_DPORT_ST_NOTSTART; bfa_sm_set_state(dport, bfa_dport_sm_enabled); } else { bfa_trc(dport->bfa, dport->i2hmsg.scn.state); WARN_ON(1); } break; default: bfa_sm_fault(dport->bfa, event); } } static void bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport, enum bfa_dport_sm_event event) { bfa_trc(dport->bfa, event); switch (event) { case BFA_DPORT_SM_QRESUME: bfa_sm_set_state(dport, bfa_dport_sm_enabling); bfa_dport_send_req(dport, BFI_DPORT_ENABLE); break; case BFA_DPORT_SM_HWFAIL: bfa_reqq_wcancel(&dport->reqq_wait); bfa_sm_set_state(dport, bfa_dport_sm_disabled); bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED); break; default: bfa_sm_fault(dport->bfa, event); } } static void bfa_dport_sm_enabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event) { bfa_trc(dport->bfa, event); switch (event) { case BFA_DPORT_SM_FWRSP: memset(&dport->result, 0, sizeof(struct bfa_diag_dport_result_s)); if (dport->i2hmsg.rsp.status == BFA_STATUS_DPORT_INV_SFP) { dport->test_state = BFA_DPORT_ST_NO_SFP; } else { dport->test_state = BFA_DPORT_ST_INP; bfa_dport_result_start(dport, BFA_DPORT_OPMODE_AUTO); } bfa_sm_set_state(dport, bfa_dport_sm_enabled); break; case BFA_DPORT_SM_REQFAIL: dport->test_state = BFA_DPORT_ST_DISABLED; bfa_fcport_dportdisable(dport->bfa); bfa_sm_set_state(dport, bfa_dport_sm_disabled); break; case BFA_DPORT_SM_HWFAIL: bfa_sm_set_state(dport, bfa_dport_sm_disabled); bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED); break; default: bfa_sm_fault(dport->bfa, event); } } static void bfa_dport_sm_enabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event) { bfa_trc(dport->bfa, event); switch (event) { case BFA_DPORT_SM_START: if (bfa_dport_send_req(dport, BFI_DPORT_START)) bfa_sm_set_state(dport, bfa_dport_sm_starting); else bfa_sm_set_state(dport, bfa_dport_sm_starting_qwait); break; case BFA_DPORT_SM_DISABLE: bfa_fcport_dportdisable(dport->bfa); if (bfa_dport_send_req(dport, BFI_DPORT_DISABLE)) bfa_sm_set_state(dport, bfa_dport_sm_disabling); else bfa_sm_set_state(dport, bfa_dport_sm_disabling_qwait); break; case BFA_DPORT_SM_HWFAIL: bfa_sm_set_state(dport, bfa_dport_sm_disabled); break; case BFA_DPORT_SM_SCN: switch (dport->i2hmsg.scn.state) { case BFI_DPORT_SCN_TESTCOMP: dport->test_state = BFA_DPORT_ST_COMP; break; case BFI_DPORT_SCN_TESTSTART: dport->test_state = BFA_DPORT_ST_INP; break; case BFI_DPORT_SCN_TESTSKIP: case BFI_DPORT_SCN_SUBTESTSTART: /* no state change */ break; case BFI_DPORT_SCN_SFP_REMOVED: dport->test_state = BFA_DPORT_ST_NO_SFP; break; case BFI_DPORT_SCN_DDPORT_DISABLE: bfa_fcport_ddportdisable(dport->bfa); if (bfa_dport_send_req(dport, BFI_DPORT_DYN_DISABLE)) bfa_sm_set_state(dport, bfa_dport_sm_dynamic_disabling); else bfa_sm_set_state(dport, bfa_dport_sm_dynamic_disabling_qwait); break; case BFI_DPORT_SCN_FCPORT_DISABLE: bfa_fcport_ddportdisable(dport->bfa); bfa_sm_set_state(dport, bfa_dport_sm_disabled); dport->dynamic = BFA_FALSE; break; default: bfa_trc(dport->bfa, dport->i2hmsg.scn.state); bfa_sm_fault(dport->bfa, event); } break; default: bfa_sm_fault(dport->bfa, event); } } static void bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport, enum bfa_dport_sm_event event) { bfa_trc(dport->bfa, event); switch (event) { case BFA_DPORT_SM_QRESUME: bfa_sm_set_state(dport, bfa_dport_sm_disabling); bfa_dport_send_req(dport, BFI_DPORT_DISABLE); break; case BFA_DPORT_SM_HWFAIL: bfa_sm_set_state(dport, bfa_dport_sm_disabled); bfa_reqq_wcancel(&dport->reqq_wait); bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK); break; case BFA_DPORT_SM_SCN: /* ignore */ break; default: bfa_sm_fault(dport->bfa, event); } } static void bfa_dport_sm_disabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event) { bfa_trc(dport->bfa, event); switch (event) { case BFA_DPORT_SM_FWRSP: dport->test_state = BFA_DPORT_ST_DISABLED; bfa_sm_set_state(dport, bfa_dport_sm_disabled); break; case BFA_DPORT_SM_HWFAIL: bfa_sm_set_state(dport, bfa_dport_sm_disabled); bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK); break; case BFA_DPORT_SM_SCN: /* no state change */ break; default: bfa_sm_fault(dport->bfa, event); } } static void bfa_dport_sm_starting_qwait(struct bfa_dport_s *dport, enum bfa_dport_sm_event event) { bfa_trc(dport->bfa, event); switch (event) { case BFA_DPORT_SM_QRESUME: bfa_sm_set_state(dport, bfa_dport_sm_starting); bfa_dport_send_req(dport, BFI_DPORT_START); break; case BFA_DPORT_SM_HWFAIL: bfa_reqq_wcancel(&dport->reqq_wait); bfa_sm_set_state(dport, bfa_dport_sm_disabled); bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED); break; default: bfa_sm_fault(dport->bfa, event); } } static void bfa_dport_sm_starting(struct bfa_dport_s *dport, enum bfa_dport_sm_event event) { bfa_trc(dport->bfa, event); switch (event) { case BFA_DPORT_SM_FWRSP: memset(&dport->result, 0, sizeof(struct bfa_diag_dport_result_s)); if (dport->i2hmsg.rsp.status == BFA_STATUS_DPORT_INV_SFP) { dport->test_state = BFA_DPORT_ST_NO_SFP; } else { dport->test_state = BFA_DPORT_ST_INP; bfa_dport_result_start(dport, BFA_DPORT_OPMODE_MANU); } fallthrough; case BFA_DPORT_SM_REQFAIL: bfa_sm_set_state(dport, bfa_dport_sm_enabled); break; case BFA_DPORT_SM_HWFAIL: bfa_sm_set_state(dport, bfa_dport_sm_disabled); bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED); break; default: bfa_sm_fault(dport->bfa, event); } } static void bfa_dport_sm_dynamic_disabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event) { bfa_trc(dport->bfa, event); switch (event) { case BFA_DPORT_SM_SCN: switch (dport->i2hmsg.scn.state) { case BFI_DPORT_SCN_DDPORT_DISABLED: bfa_sm_set_state(dport, bfa_dport_sm_disabled); dport->dynamic = BFA_FALSE; bfa_fcport_enable(dport->bfa); break; default: bfa_trc(dport->bfa, dport->i2hmsg.scn.state); bfa_sm_fault(dport->bfa, event); } break; case BFA_DPORT_SM_HWFAIL: bfa_sm_set_state(dport, bfa_dport_sm_disabled); bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK); break; default: bfa_sm_fault(dport->bfa, event); } } static void bfa_dport_sm_dynamic_disabling_qwait(struct bfa_dport_s *dport, enum bfa_dport_sm_event event) { bfa_trc(dport->bfa, event); switch (event) { case BFA_DPORT_SM_QRESUME: bfa_sm_set_state(dport, bfa_dport_sm_dynamic_disabling); bfa_dport_send_req(dport, BFI_DPORT_DYN_DISABLE); break; case BFA_DPORT_SM_HWFAIL: bfa_sm_set_state(dport, bfa_dport_sm_disabled); bfa_reqq_wcancel(&dport->reqq_wait); bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK); break; case BFA_DPORT_SM_SCN: /* ignore */ break; default: bfa_sm_fault(dport->bfa, event); } } static bfa_boolean_t bfa_dport_send_req(struct bfa_dport_s *dport, enum bfi_dport_req req) { struct bfi_diag_dport_req_s *m; /* * check for room in queue to send request now */ m = bfa_reqq_next(dport->bfa, BFA_REQQ_DIAG); if (!m) { bfa_reqq_wait(dport->bfa, BFA_REQQ_PORT, &dport->reqq_wait); return BFA_FALSE; } bfi_h2i_set(m->mh, BFI_MC_DIAG, BFI_DIAG_H2I_DPORT, bfa_fn_lpu(dport->bfa)); m->req = req; if ((req == BFI_DPORT_ENABLE) || (req == BFI_DPORT_START)) { m->lpcnt = cpu_to_be32(dport->lpcnt); m->payload = cpu_to_be32(dport->payload); } /* * queue I/O message to firmware */ bfa_reqq_produce(dport->bfa, BFA_REQQ_DIAG, m->mh); return BFA_TRUE; } static void bfa_dport_qresume(void *cbarg) { struct bfa_dport_s *dport = cbarg; bfa_sm_send_event(dport, BFA_DPORT_SM_QRESUME); } static void bfa_dport_req_comp(struct bfa_dport_s *dport, struct bfi_diag_dport_rsp_s *msg) { msg->status = cpu_to_be32(msg->status); dport->i2hmsg.rsp.status = msg->status; dport->rp_pwwn = msg->pwwn; dport->rp_nwwn = msg->nwwn; if ((msg->status == BFA_STATUS_OK) || (msg->status == BFA_STATUS_DPORT_NO_SFP)) { bfa_trc(dport->bfa, msg->status); bfa_trc(dport->bfa, dport->rp_pwwn); bfa_trc(dport->bfa, dport->rp_nwwn); bfa_sm_send_event(dport, BFA_DPORT_SM_FWRSP); } else { bfa_trc(dport->bfa, msg->status); bfa_sm_send_event(dport, BFA_DPORT_SM_REQFAIL); } bfa_cb_fcdiag_dport(dport, msg->status); } static bfa_boolean_t bfa_dport_is_sending_req(struct bfa_dport_s *dport) { if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling) || bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait) || bfa_sm_cmp_state(dport, bfa_dport_sm_disabling) || bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait) || bfa_sm_cmp_state(dport, bfa_dport_sm_starting) || bfa_sm_cmp_state(dport, bfa_dport_sm_starting_qwait)) { return BFA_TRUE; } else { return BFA_FALSE; } } static void bfa_dport_scn(struct bfa_dport_s *dport, struct bfi_diag_dport_scn_s *msg) { int i; uint8_t subtesttype; bfa_trc(dport->bfa, msg->state); dport->i2hmsg.scn.state = msg->state; switch (dport->i2hmsg.scn.state) { case BFI_DPORT_SCN_TESTCOMP: dport->result.end_time = ktime_get_real_seconds(); bfa_trc(dport->bfa, dport->result.end_time); dport->result.status = msg->info.testcomp.status; bfa_trc(dport->bfa, dport->result.status); dport->result.roundtrip_latency = cpu_to_be32(msg->info.testcomp.latency); dport->result.est_cable_distance = cpu_to_be32(msg->info.testcomp.distance); dport->result.buffer_required = be16_to_cpu(msg->info.testcomp.numbuffer); dport->result.frmsz = be16_to_cpu(msg->info.testcomp.frm_sz); dport->result.speed = msg->info.testcomp.speed; bfa_trc(dport->bfa, dport->result.roundtrip_latency); bfa_trc(dport->bfa, dport->result.est_cable_distance); bfa_trc(dport->bfa, dport->result.buffer_required); bfa_trc(dport->bfa, dport->result.frmsz); bfa_trc(dport->bfa, dport->result.speed); for (i = DPORT_TEST_ELOOP; i < DPORT_TEST_MAX; i++) { dport->result.subtest[i].status = msg->info.testcomp.subtest_status[i]; bfa_trc(dport->bfa, dport->result.subtest[i].status); } break; case BFI_DPORT_SCN_TESTSKIP: case BFI_DPORT_SCN_DDPORT_ENABLE: memset(&dport->result, 0, sizeof(struct bfa_diag_dport_result_s)); break; case BFI_DPORT_SCN_TESTSTART: memset(&dport->result, 0, sizeof(struct bfa_diag_dport_result_s)); dport->rp_pwwn = msg->info.teststart.pwwn; dport->rp_nwwn = msg->info.teststart.nwwn; dport->lpcnt = cpu_to_be32(msg->info.teststart.numfrm); bfa_dport_result_start(dport, msg->info.teststart.mode); break; case BFI_DPORT_SCN_SUBTESTSTART: subtesttype = msg->info.teststart.type; dport->result.subtest[subtesttype].start_time = ktime_get_real_seconds(); dport->result.subtest[subtesttype].status = DPORT_TEST_ST_INPRG; bfa_trc(dport->bfa, subtesttype); bfa_trc(dport->bfa, dport->result.subtest[subtesttype].start_time); break; case BFI_DPORT_SCN_SFP_REMOVED: case BFI_DPORT_SCN_DDPORT_DISABLED: case BFI_DPORT_SCN_DDPORT_DISABLE: case BFI_DPORT_SCN_FCPORT_DISABLE: dport->result.status = DPORT_TEST_ST_IDLE; break; default: bfa_sm_fault(dport->bfa, msg->state); } bfa_sm_send_event(dport, BFA_DPORT_SM_SCN); } /* * Dport enable * * @param[in] *bfa - bfa data struct */ bfa_status_t bfa_dport_enable(struct bfa_s *bfa, u32 lpcnt, u32 pat, bfa_cb_diag_t cbfn, void *cbarg) { struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); struct bfa_dport_s *dport = &fcdiag->dport; /* * Dport is not support in MEZZ card */ if (bfa_mfg_is_mezz(dport->bfa->ioc.attr->card_type)) { bfa_trc(dport->bfa, BFA_STATUS_PBC); return BFA_STATUS_CMD_NOTSUPP_MEZZ; } /* * Dport is supported in CT2 or above */ if (!(bfa_asic_id_ct2(dport->bfa->ioc.pcidev.device_id))) { bfa_trc(dport->bfa, dport->bfa->ioc.pcidev.device_id); return BFA_STATUS_FEATURE_NOT_SUPPORTED; } /* * Check to see if IOC is down */ if (!bfa_iocfc_is_operational(bfa)) return BFA_STATUS_IOC_NON_OP; /* if port is PBC disabled, return error */ if (bfa_fcport_is_pbcdisabled(bfa)) { bfa_trc(dport->bfa, BFA_STATUS_PBC); return BFA_STATUS_PBC; } /* * Check if port mode is FC port */ if (bfa_ioc_get_type(&bfa->ioc) != BFA_IOC_TYPE_FC) { bfa_trc(dport->bfa, bfa_ioc_get_type(&bfa->ioc)); return BFA_STATUS_CMD_NOTSUPP_CNA; } /* * Check if port is in LOOP mode */ if ((bfa_fcport_get_cfg_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) || (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP)) { bfa_trc(dport->bfa, 0); return BFA_STATUS_TOPOLOGY_LOOP; } /* * Check if port is TRUNK mode */ if (bfa_fcport_is_trunk_enabled(bfa)) { bfa_trc(dport->bfa, 0); return BFA_STATUS_ERROR_TRUNK_ENABLED; } /* * Check if diag loopback is running */ if (bfa_fcdiag_lb_is_running(bfa)) { bfa_trc(dport->bfa, 0); return BFA_STATUS_DIAG_BUSY; } /* * Check to see if port is disable or in dport state */ if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) && (bfa_fcport_is_dport(bfa) == BFA_FALSE)) { bfa_trc(dport->bfa, 0); return BFA_STATUS_PORT_NOT_DISABLED; } /* * Check if dport is in dynamic mode */ if (dport->dynamic) return BFA_STATUS_DDPORT_ERR; /* * Check if dport is busy */ if (bfa_dport_is_sending_req(dport)) return BFA_STATUS_DEVBUSY; /* * Check if dport is already enabled */ if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) { bfa_trc(dport->bfa, 0); return BFA_STATUS_DPORT_ENABLED; } bfa_trc(dport->bfa, lpcnt); bfa_trc(dport->bfa, pat); dport->lpcnt = (lpcnt) ? lpcnt : DPORT_ENABLE_LOOPCNT_DEFAULT; dport->payload = (pat) ? pat : LB_PATTERN_DEFAULT; dport->cbfn = cbfn; dport->cbarg = cbarg; bfa_sm_send_event(dport, BFA_DPORT_SM_ENABLE); return BFA_STATUS_OK; } /* * Dport disable * * @param[in] *bfa - bfa data struct */ bfa_status_t bfa_dport_disable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg) { struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); struct bfa_dport_s *dport = &fcdiag->dport; if (bfa_ioc_is_disabled(&bfa->ioc)) return BFA_STATUS_IOC_DISABLED; /* if port is PBC disabled, return error */ if (bfa_fcport_is_pbcdisabled(bfa)) { bfa_trc(dport->bfa, BFA_STATUS_PBC); return BFA_STATUS_PBC; } /* * Check if dport is in dynamic mode */ if (dport->dynamic) { return BFA_STATUS_DDPORT_ERR; } /* * Check to see if port is disable or in dport state */ if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) && (bfa_fcport_is_dport(bfa) == BFA_FALSE)) { bfa_trc(dport->bfa, 0); return BFA_STATUS_PORT_NOT_DISABLED; } /* * Check if dport is busy */ if (bfa_dport_is_sending_req(dport)) return BFA_STATUS_DEVBUSY; /* * Check if dport is already disabled */ if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabled)) { bfa_trc(dport->bfa, 0); return BFA_STATUS_DPORT_DISABLED; } dport->cbfn = cbfn; dport->cbarg = cbarg; bfa_sm_send_event(dport, BFA_DPORT_SM_DISABLE); return BFA_STATUS_OK; } /* * Dport start -- restart dport test * * @param[in] *bfa - bfa data struct */ bfa_status_t bfa_dport_start(struct bfa_s *bfa, u32 lpcnt, u32 pat, bfa_cb_diag_t cbfn, void *cbarg) { struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); struct bfa_dport_s *dport = &fcdiag->dport; /* * Check to see if IOC is down */ if (!bfa_iocfc_is_operational(bfa)) return BFA_STATUS_IOC_NON_OP; /* * Check if dport is in dynamic mode */ if (dport->dynamic) return BFA_STATUS_DDPORT_ERR; /* * Check if dport is busy */ if (bfa_dport_is_sending_req(dport)) return BFA_STATUS_DEVBUSY; /* * Check if dport is in enabled state. * Test can only be restart when previous test has completed */ if (!bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) { bfa_trc(dport->bfa, 0); return BFA_STATUS_DPORT_DISABLED; } else { if (dport->test_state == BFA_DPORT_ST_NO_SFP) return BFA_STATUS_DPORT_INV_SFP; if (dport->test_state == BFA_DPORT_ST_INP) return BFA_STATUS_DEVBUSY; WARN_ON(dport->test_state != BFA_DPORT_ST_COMP); } bfa_trc(dport->bfa, lpcnt); bfa_trc(dport->bfa, pat); dport->lpcnt = (lpcnt) ? lpcnt : DPORT_ENABLE_LOOPCNT_DEFAULT; dport->payload = (pat) ? pat : LB_PATTERN_DEFAULT; dport->cbfn = cbfn; dport->cbarg = cbarg; bfa_sm_send_event(dport, BFA_DPORT_SM_START); return BFA_STATUS_OK; } /* * Dport show -- return dport test result * * @param[in] *bfa - bfa data struct */ bfa_status_t bfa_dport_show(struct bfa_s *bfa, struct bfa_diag_dport_result_s *result) { struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); struct bfa_dport_s *dport = &fcdiag->dport; /* * Check to see if IOC is down */ if (!bfa_iocfc_is_operational(bfa)) return BFA_STATUS_IOC_NON_OP; /* * Check if dport is busy */ if (bfa_dport_is_sending_req(dport)) return BFA_STATUS_DEVBUSY; /* * Check if dport is in enabled state. */ if (!bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) { bfa_trc(dport->bfa, 0); return BFA_STATUS_DPORT_DISABLED; } /* * Check if there is SFP */ if (dport->test_state == BFA_DPORT_ST_NO_SFP) return BFA_STATUS_DPORT_INV_SFP; memcpy(result, &dport->result, sizeof(struct bfa_diag_dport_result_s)); return BFA_STATUS_OK; }
linux-master
drivers/scsi/bfa/bfa_svc.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. * Copyright (c) 2014- QLogic Corporation. * All rights reserved * www.qlogic.com * * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. */ #include "bfad_drv.h" #include "bfa_modules.h" #include "bfi_reg.h" BFA_TRC_FILE(HAL, CORE); /* * Message handlers for various modules. */ static bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = { bfa_isr_unhandled, /* NONE */ bfa_isr_unhandled, /* BFI_MC_IOC */ bfa_fcdiag_intr, /* BFI_MC_DIAG */ bfa_isr_unhandled, /* BFI_MC_FLASH */ bfa_isr_unhandled, /* BFI_MC_CEE */ bfa_fcport_isr, /* BFI_MC_FCPORT */ bfa_isr_unhandled, /* BFI_MC_IOCFC */ bfa_isr_unhandled, /* BFI_MC_LL */ bfa_uf_isr, /* BFI_MC_UF */ bfa_fcxp_isr, /* BFI_MC_FCXP */ bfa_lps_isr, /* BFI_MC_LPS */ bfa_rport_isr, /* BFI_MC_RPORT */ bfa_itn_isr, /* BFI_MC_ITN */ bfa_isr_unhandled, /* BFI_MC_IOIM_READ */ bfa_isr_unhandled, /* BFI_MC_IOIM_WRITE */ bfa_isr_unhandled, /* BFI_MC_IOIM_IO */ bfa_ioim_isr, /* BFI_MC_IOIM */ bfa_ioim_good_comp_isr, /* BFI_MC_IOIM_IOCOM */ bfa_tskim_isr, /* BFI_MC_TSKIM */ bfa_isr_unhandled, /* BFI_MC_SBOOT */ bfa_isr_unhandled, /* BFI_MC_IPFC */ bfa_isr_unhandled, /* BFI_MC_PORT */ bfa_isr_unhandled, /* --------- */ bfa_isr_unhandled, /* --------- */ bfa_isr_unhandled, /* --------- */ bfa_isr_unhandled, /* --------- */ bfa_isr_unhandled, /* --------- */ bfa_isr_unhandled, /* --------- */ bfa_isr_unhandled, /* --------- */ bfa_isr_unhandled, /* --------- */ bfa_isr_unhandled, /* --------- */ bfa_isr_unhandled, /* --------- */ }; /* * Message handlers for mailbox command classes */ static bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = { NULL, NULL, /* BFI_MC_IOC */ NULL, /* BFI_MC_DIAG */ NULL, /* BFI_MC_FLASH */ NULL, /* BFI_MC_CEE */ NULL, /* BFI_MC_PORT */ bfa_iocfc_isr, /* BFI_MC_IOCFC */ NULL, }; void __bfa_trc(struct bfa_trc_mod_s *trcm, int fileno, int line, u64 data) { int tail = trcm->tail; struct bfa_trc_s *trc = &trcm->trc[tail]; if (trcm->stopped) return; trc->fileno = (u16) fileno; trc->line = (u16) line; trc->data.u64 = data; trc->timestamp = BFA_TRC_TS(trcm); trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1); if (trcm->tail == trcm->head) trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1); } static void bfa_com_port_attach(struct bfa_s *bfa) { struct bfa_port_s *port = &bfa->modules.port; struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa); bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod); bfa_port_mem_claim(port, port_dma->kva_curp, port_dma->dma_curp); } /* * ablk module attach */ static void bfa_com_ablk_attach(struct bfa_s *bfa) { struct bfa_ablk_s *ablk = &bfa->modules.ablk; struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa); bfa_ablk_attach(ablk, &bfa->ioc); bfa_ablk_memclaim(ablk, ablk_dma->kva_curp, ablk_dma->dma_curp); } static void bfa_com_cee_attach(struct bfa_s *bfa) { struct bfa_cee_s *cee = &bfa->modules.cee; struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa); cee->trcmod = bfa->trcmod; bfa_cee_attach(cee, &bfa->ioc, bfa); bfa_cee_mem_claim(cee, cee_dma->kva_curp, cee_dma->dma_curp); } static void bfa_com_sfp_attach(struct bfa_s *bfa) { struct bfa_sfp_s *sfp = BFA_SFP_MOD(bfa); struct bfa_mem_dma_s *sfp_dma = BFA_MEM_SFP_DMA(bfa); bfa_sfp_attach(sfp, &bfa->ioc, bfa, bfa->trcmod); bfa_sfp_memclaim(sfp, sfp_dma->kva_curp, sfp_dma->dma_curp); } static void bfa_com_flash_attach(struct bfa_s *bfa, bfa_boolean_t mincfg) { struct bfa_flash_s *flash = BFA_FLASH(bfa); struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa); bfa_flash_attach(flash, &bfa->ioc, bfa, bfa->trcmod, mincfg); bfa_flash_memclaim(flash, flash_dma->kva_curp, flash_dma->dma_curp, mincfg); } static void bfa_com_diag_attach(struct bfa_s *bfa) { struct bfa_diag_s *diag = BFA_DIAG_MOD(bfa); struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa); bfa_diag_attach(diag, &bfa->ioc, bfa, bfa_fcport_beacon, bfa->trcmod); bfa_diag_memclaim(diag, diag_dma->kva_curp, diag_dma->dma_curp); } static void bfa_com_phy_attach(struct bfa_s *bfa, bfa_boolean_t mincfg) { struct bfa_phy_s *phy = BFA_PHY(bfa); struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa); bfa_phy_attach(phy, &bfa->ioc, bfa, bfa->trcmod, mincfg); bfa_phy_memclaim(phy, phy_dma->kva_curp, phy_dma->dma_curp, mincfg); } static void bfa_com_fru_attach(struct bfa_s *bfa, bfa_boolean_t mincfg) { struct bfa_fru_s *fru = BFA_FRU(bfa); struct bfa_mem_dma_s *fru_dma = BFA_MEM_FRU_DMA(bfa); bfa_fru_attach(fru, &bfa->ioc, bfa, bfa->trcmod, mincfg); bfa_fru_memclaim(fru, fru_dma->kva_curp, fru_dma->dma_curp, mincfg); } /* * BFA IOC FC related definitions */ /* * IOC local definitions */ #define BFA_IOCFC_TOV 5000 /* msecs */ enum { BFA_IOCFC_ACT_NONE = 0, BFA_IOCFC_ACT_INIT = 1, BFA_IOCFC_ACT_STOP = 2, BFA_IOCFC_ACT_DISABLE = 3, BFA_IOCFC_ACT_ENABLE = 4, }; #define DEF_CFG_NUM_FABRICS 1 #define DEF_CFG_NUM_LPORTS 256 #define DEF_CFG_NUM_CQS 4 #define DEF_CFG_NUM_IOIM_REQS (BFA_IOIM_MAX) #define DEF_CFG_NUM_TSKIM_REQS 128 #define DEF_CFG_NUM_FCXP_REQS 64 #define DEF_CFG_NUM_UF_BUFS 64 #define DEF_CFG_NUM_RPORTS 1024 #define DEF_CFG_NUM_ITNIMS (DEF_CFG_NUM_RPORTS) #define DEF_CFG_NUM_TINS 256 #define DEF_CFG_NUM_SGPGS 2048 #define DEF_CFG_NUM_REQQ_ELEMS 256 #define DEF_CFG_NUM_RSPQ_ELEMS 64 #define DEF_CFG_NUM_SBOOT_TGTS 16 #define DEF_CFG_NUM_SBOOT_LUNS 16 /* * IOCFC state machine definitions/declarations */ bfa_fsm_state_decl(bfa_iocfc, stopped, struct bfa_iocfc_s, enum iocfc_event); bfa_fsm_state_decl(bfa_iocfc, initing, struct bfa_iocfc_s, enum iocfc_event); bfa_fsm_state_decl(bfa_iocfc, dconf_read, struct bfa_iocfc_s, enum iocfc_event); bfa_fsm_state_decl(bfa_iocfc, init_cfg_wait, struct bfa_iocfc_s, enum iocfc_event); bfa_fsm_state_decl(bfa_iocfc, init_cfg_done, struct bfa_iocfc_s, enum iocfc_event); bfa_fsm_state_decl(bfa_iocfc, operational, struct bfa_iocfc_s, enum iocfc_event); bfa_fsm_state_decl(bfa_iocfc, dconf_write, struct bfa_iocfc_s, enum iocfc_event); bfa_fsm_state_decl(bfa_iocfc, stopping, struct bfa_iocfc_s, enum iocfc_event); bfa_fsm_state_decl(bfa_iocfc, enabling, struct bfa_iocfc_s, enum iocfc_event); bfa_fsm_state_decl(bfa_iocfc, cfg_wait, struct bfa_iocfc_s, enum iocfc_event); bfa_fsm_state_decl(bfa_iocfc, disabling, struct bfa_iocfc_s, enum iocfc_event); bfa_fsm_state_decl(bfa_iocfc, disabled, struct bfa_iocfc_s, enum iocfc_event); bfa_fsm_state_decl(bfa_iocfc, failed, struct bfa_iocfc_s, enum iocfc_event); bfa_fsm_state_decl(bfa_iocfc, init_failed, struct bfa_iocfc_s, enum iocfc_event); /* * forward declaration for IOC FC functions */ static void bfa_iocfc_start_submod(struct bfa_s *bfa); static void bfa_iocfc_disable_submod(struct bfa_s *bfa); static void bfa_iocfc_send_cfg(void *bfa_arg); static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status); static void bfa_iocfc_disable_cbfn(void *bfa_arg); static void bfa_iocfc_hbfail_cbfn(void *bfa_arg); static void bfa_iocfc_reset_cbfn(void *bfa_arg); static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn; static void bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete); static void bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl); static void bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl); static void bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl); static void bfa_iocfc_sm_stopped_entry(struct bfa_iocfc_s *iocfc) { } static void bfa_iocfc_sm_stopped(struct bfa_iocfc_s *iocfc, enum iocfc_event event) { bfa_trc(iocfc->bfa, event); switch (event) { case IOCFC_E_INIT: case IOCFC_E_ENABLE: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_initing); break; default: bfa_sm_fault(iocfc->bfa, event); break; } } static void bfa_iocfc_sm_initing_entry(struct bfa_iocfc_s *iocfc) { bfa_ioc_enable(&iocfc->bfa->ioc); } static void bfa_iocfc_sm_initing(struct bfa_iocfc_s *iocfc, enum iocfc_event event) { bfa_trc(iocfc->bfa, event); switch (event) { case IOCFC_E_IOC_ENABLED: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read); break; case IOCFC_E_DISABLE: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); break; case IOCFC_E_STOP: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping); break; case IOCFC_E_IOC_FAILED: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed); break; default: bfa_sm_fault(iocfc->bfa, event); break; } } static void bfa_iocfc_sm_dconf_read_entry(struct bfa_iocfc_s *iocfc) { bfa_dconf_modinit(iocfc->bfa); } static void bfa_iocfc_sm_dconf_read(struct bfa_iocfc_s *iocfc, enum iocfc_event event) { bfa_trc(iocfc->bfa, event); switch (event) { case IOCFC_E_DCONF_DONE: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_wait); break; case IOCFC_E_DISABLE: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); break; case IOCFC_E_STOP: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping); break; case IOCFC_E_IOC_FAILED: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed); break; default: bfa_sm_fault(iocfc->bfa, event); break; } } static void bfa_iocfc_sm_init_cfg_wait_entry(struct bfa_iocfc_s *iocfc) { bfa_iocfc_send_cfg(iocfc->bfa); } static void bfa_iocfc_sm_init_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event) { bfa_trc(iocfc->bfa, event); switch (event) { case IOCFC_E_CFG_DONE: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_done); break; case IOCFC_E_DISABLE: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); break; case IOCFC_E_STOP: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping); break; case IOCFC_E_IOC_FAILED: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed); break; default: bfa_sm_fault(iocfc->bfa, event); break; } } static void bfa_iocfc_sm_init_cfg_done_entry(struct bfa_iocfc_s *iocfc) { iocfc->bfa->iocfc.op_status = BFA_STATUS_OK; bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb, iocfc->bfa); } static void bfa_iocfc_sm_init_cfg_done(struct bfa_iocfc_s *iocfc, enum iocfc_event event) { bfa_trc(iocfc->bfa, event); switch (event) { case IOCFC_E_START: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_operational); break; case IOCFC_E_STOP: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping); break; case IOCFC_E_DISABLE: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); break; case IOCFC_E_IOC_FAILED: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed); break; default: bfa_sm_fault(iocfc->bfa, event); break; } } static void bfa_iocfc_sm_operational_entry(struct bfa_iocfc_s *iocfc) { bfa_fcport_init(iocfc->bfa); bfa_iocfc_start_submod(iocfc->bfa); } static void bfa_iocfc_sm_operational(struct bfa_iocfc_s *iocfc, enum iocfc_event event) { bfa_trc(iocfc->bfa, event); switch (event) { case IOCFC_E_STOP: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write); break; case IOCFC_E_DISABLE: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); break; case IOCFC_E_IOC_FAILED: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed); break; default: bfa_sm_fault(iocfc->bfa, event); break; } } static void bfa_iocfc_sm_dconf_write_entry(struct bfa_iocfc_s *iocfc) { bfa_dconf_modexit(iocfc->bfa); } static void bfa_iocfc_sm_dconf_write(struct bfa_iocfc_s *iocfc, enum iocfc_event event) { bfa_trc(iocfc->bfa, event); switch (event) { case IOCFC_E_DCONF_DONE: case IOCFC_E_IOC_FAILED: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping); break; default: bfa_sm_fault(iocfc->bfa, event); break; } } static void bfa_iocfc_sm_stopping_entry(struct bfa_iocfc_s *iocfc) { bfa_ioc_disable(&iocfc->bfa->ioc); } static void bfa_iocfc_sm_stopping(struct bfa_iocfc_s *iocfc, enum iocfc_event event) { bfa_trc(iocfc->bfa, event); switch (event) { case IOCFC_E_IOC_DISABLED: bfa_isr_disable(iocfc->bfa); bfa_iocfc_disable_submod(iocfc->bfa); bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopped); iocfc->bfa->iocfc.op_status = BFA_STATUS_OK; bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb, iocfc->bfa); break; case IOCFC_E_IOC_ENABLED: case IOCFC_E_DCONF_DONE: case IOCFC_E_CFG_DONE: break; default: bfa_sm_fault(iocfc->bfa, event); break; } } static void bfa_iocfc_sm_enabling_entry(struct bfa_iocfc_s *iocfc) { bfa_ioc_enable(&iocfc->bfa->ioc); } static void bfa_iocfc_sm_enabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event) { bfa_trc(iocfc->bfa, event); switch (event) { case IOCFC_E_IOC_ENABLED: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait); break; case IOCFC_E_DISABLE: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); break; case IOCFC_E_STOP: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write); break; case IOCFC_E_IOC_FAILED: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed); if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE) break; iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED; bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe, bfa_iocfc_enable_cb, iocfc->bfa); iocfc->bfa->iocfc.cb_reqd = BFA_FALSE; break; default: bfa_sm_fault(iocfc->bfa, event); break; } } static void bfa_iocfc_sm_cfg_wait_entry(struct bfa_iocfc_s *iocfc) { bfa_iocfc_send_cfg(iocfc->bfa); } static void bfa_iocfc_sm_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event) { bfa_trc(iocfc->bfa, event); switch (event) { case IOCFC_E_CFG_DONE: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_operational); if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE) break; iocfc->bfa->iocfc.op_status = BFA_STATUS_OK; bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe, bfa_iocfc_enable_cb, iocfc->bfa); iocfc->bfa->iocfc.cb_reqd = BFA_FALSE; break; case IOCFC_E_DISABLE: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); break; case IOCFC_E_STOP: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write); break; case IOCFC_E_IOC_FAILED: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed); if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE) break; iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED; bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe, bfa_iocfc_enable_cb, iocfc->bfa); iocfc->bfa->iocfc.cb_reqd = BFA_FALSE; break; default: bfa_sm_fault(iocfc->bfa, event); break; } } static void bfa_iocfc_sm_disabling_entry(struct bfa_iocfc_s *iocfc) { bfa_ioc_disable(&iocfc->bfa->ioc); } static void bfa_iocfc_sm_disabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event) { bfa_trc(iocfc->bfa, event); switch (event) { case IOCFC_E_IOC_DISABLED: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabled); break; case IOCFC_E_IOC_ENABLED: case IOCFC_E_DCONF_DONE: case IOCFC_E_CFG_DONE: break; default: bfa_sm_fault(iocfc->bfa, event); break; } } static void bfa_iocfc_sm_disabled_entry(struct bfa_iocfc_s *iocfc) { bfa_isr_disable(iocfc->bfa); bfa_iocfc_disable_submod(iocfc->bfa); iocfc->bfa->iocfc.op_status = BFA_STATUS_OK; bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb, iocfc->bfa); } static void bfa_iocfc_sm_disabled(struct bfa_iocfc_s *iocfc, enum iocfc_event event) { bfa_trc(iocfc->bfa, event); switch (event) { case IOCFC_E_STOP: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write); break; case IOCFC_E_ENABLE: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_enabling); break; default: bfa_sm_fault(iocfc->bfa, event); break; } } static void bfa_iocfc_sm_failed_entry(struct bfa_iocfc_s *iocfc) { bfa_isr_disable(iocfc->bfa); bfa_iocfc_disable_submod(iocfc->bfa); } static void bfa_iocfc_sm_failed(struct bfa_iocfc_s *iocfc, enum iocfc_event event) { bfa_trc(iocfc->bfa, event); switch (event) { case IOCFC_E_STOP: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write); break; case IOCFC_E_DISABLE: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); break; case IOCFC_E_IOC_ENABLED: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait); break; case IOCFC_E_IOC_FAILED: break; default: bfa_sm_fault(iocfc->bfa, event); break; } } static void bfa_iocfc_sm_init_failed_entry(struct bfa_iocfc_s *iocfc) { bfa_isr_disable(iocfc->bfa); iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED; bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb, iocfc->bfa); } static void bfa_iocfc_sm_init_failed(struct bfa_iocfc_s *iocfc, enum iocfc_event event) { bfa_trc(iocfc->bfa, event); switch (event) { case IOCFC_E_STOP: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping); break; case IOCFC_E_DISABLE: bfa_ioc_disable(&iocfc->bfa->ioc); break; case IOCFC_E_IOC_ENABLED: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read); break; case IOCFC_E_IOC_DISABLED: bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopped); iocfc->bfa->iocfc.op_status = BFA_STATUS_OK; bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb, iocfc->bfa); break; case IOCFC_E_IOC_FAILED: break; default: bfa_sm_fault(iocfc->bfa, event); break; } } /* * BFA Interrupt handling functions */ static void bfa_reqq_resume(struct bfa_s *bfa, int qid) { struct list_head *waitq, *qe, *qen; struct bfa_reqq_wait_s *wqe; waitq = bfa_reqq(bfa, qid); list_for_each_safe(qe, qen, waitq) { /* * Callback only as long as there is room in request queue */ if (bfa_reqq_full(bfa, qid)) break; list_del(qe); wqe = (struct bfa_reqq_wait_s *) qe; wqe->qresume(wqe->cbarg); } } static bfa_boolean_t bfa_isr_rspq(struct bfa_s *bfa, int qid) { struct bfi_msg_s *m; u32 pi, ci; struct list_head *waitq; bfa_boolean_t ret; ci = bfa_rspq_ci(bfa, qid); pi = bfa_rspq_pi(bfa, qid); ret = (ci != pi); while (ci != pi) { m = bfa_rspq_elem(bfa, qid, ci); WARN_ON(m->mhdr.msg_class >= BFI_MC_MAX); bfa_isrs[m->mhdr.msg_class] (bfa, m); CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems); } /* * acknowledge RME completions and update CI */ bfa_isr_rspq_ack(bfa, qid, ci); /* * Resume any pending requests in the corresponding reqq. */ waitq = bfa_reqq(bfa, qid); if (!list_empty(waitq)) bfa_reqq_resume(bfa, qid); return ret; } static inline void bfa_isr_reqq(struct bfa_s *bfa, int qid) { struct list_head *waitq; bfa_isr_reqq_ack(bfa, qid); /* * Resume any pending requests in the corresponding reqq. */ waitq = bfa_reqq(bfa, qid); if (!list_empty(waitq)) bfa_reqq_resume(bfa, qid); } void bfa_msix_all(struct bfa_s *bfa, int vec) { u32 intr, qintr; int queue; intr = readl(bfa->iocfc.bfa_regs.intr_status); if (!intr) return; /* * RME completion queue interrupt */ qintr = intr & __HFN_INT_RME_MASK; if (qintr && bfa->queue_process) { for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++) bfa_isr_rspq(bfa, queue); } intr &= ~qintr; if (!intr) return; /* * CPE completion queue interrupt */ qintr = intr & __HFN_INT_CPE_MASK; if (qintr && bfa->queue_process) { for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++) bfa_isr_reqq(bfa, queue); } intr &= ~qintr; if (!intr) return; bfa_msix_lpu_err(bfa, intr); } bfa_boolean_t bfa_intx(struct bfa_s *bfa) { u32 intr, qintr; int queue; bfa_boolean_t rspq_comp = BFA_FALSE; intr = readl(bfa->iocfc.bfa_regs.intr_status); qintr = intr & (__HFN_INT_RME_MASK | __HFN_INT_CPE_MASK); if (qintr) writel(qintr, bfa->iocfc.bfa_regs.intr_status); /* * Unconditional RME completion queue interrupt */ if (bfa->queue_process) { for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++) if (bfa_isr_rspq(bfa, queue)) rspq_comp = BFA_TRUE; } if (!intr) return (qintr | rspq_comp) ? BFA_TRUE : BFA_FALSE; /* * CPE completion queue interrupt */ qintr = intr & __HFN_INT_CPE_MASK; if (qintr && bfa->queue_process) { for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++) bfa_isr_reqq(bfa, queue); } intr &= ~qintr; if (!intr) return BFA_TRUE; if (bfa->intr_enabled) bfa_msix_lpu_err(bfa, intr); return BFA_TRUE; } void bfa_isr_enable(struct bfa_s *bfa) { u32 umsk; int port_id = bfa_ioc_portid(&bfa->ioc); bfa_trc(bfa, bfa_ioc_pcifn(&bfa->ioc)); bfa_trc(bfa, port_id); bfa_msix_ctrl_install(bfa); if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) { umsk = __HFN_INT_ERR_MASK_CT2; umsk |= port_id == 0 ? __HFN_INT_FN0_MASK_CT2 : __HFN_INT_FN1_MASK_CT2; } else { umsk = __HFN_INT_ERR_MASK; umsk |= port_id == 0 ? __HFN_INT_FN0_MASK : __HFN_INT_FN1_MASK; } writel(umsk, bfa->iocfc.bfa_regs.intr_status); writel(~umsk, bfa->iocfc.bfa_regs.intr_mask); bfa->iocfc.intr_mask = ~umsk; bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0); /* * Set the flag indicating successful enabling of interrupts */ bfa->intr_enabled = BFA_TRUE; } void bfa_isr_disable(struct bfa_s *bfa) { bfa->intr_enabled = BFA_FALSE; bfa_isr_mode_set(bfa, BFA_FALSE); writel(-1L, bfa->iocfc.bfa_regs.intr_mask); bfa_msix_uninstall(bfa); } void bfa_msix_reqq(struct bfa_s *bfa, int vec) { bfa_isr_reqq(bfa, vec - bfa->iocfc.hwif.cpe_vec_q0); } void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m) { bfa_trc(bfa, m->mhdr.msg_class); bfa_trc(bfa, m->mhdr.msg_id); bfa_trc(bfa, m->mhdr.mtag.i2htok); WARN_ON(1); bfa_trc_stop(bfa->trcmod); } void bfa_msix_rspq(struct bfa_s *bfa, int vec) { bfa_isr_rspq(bfa, vec - bfa->iocfc.hwif.rme_vec_q0); } void bfa_msix_lpu_err(struct bfa_s *bfa, int vec) { u32 intr, curr_value; bfa_boolean_t lpu_isr, halt_isr, pss_isr; intr = readl(bfa->iocfc.bfa_regs.intr_status); if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) { halt_isr = intr & __HFN_INT_CPQ_HALT_CT2; pss_isr = intr & __HFN_INT_ERR_PSS_CT2; lpu_isr = intr & (__HFN_INT_MBOX_LPU0_CT2 | __HFN_INT_MBOX_LPU1_CT2); intr &= __HFN_INT_ERR_MASK_CT2; } else { halt_isr = bfa_asic_id_ct(bfa->ioc.pcidev.device_id) ? (intr & __HFN_INT_LL_HALT) : 0; pss_isr = intr & __HFN_INT_ERR_PSS; lpu_isr = intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1); intr &= __HFN_INT_ERR_MASK; } if (lpu_isr) bfa_ioc_mbox_isr(&bfa->ioc); if (intr) { if (halt_isr) { /* * If LL_HALT bit is set then FW Init Halt LL Port * Register needs to be cleared as well so Interrupt * Status Register will be cleared. */ curr_value = readl(bfa->ioc.ioc_regs.ll_halt); curr_value &= ~__FW_INIT_HALT_P; writel(curr_value, bfa->ioc.ioc_regs.ll_halt); } if (pss_isr) { /* * ERR_PSS bit needs to be cleared as well in case * interrups are shared so driver's interrupt handler is * still called even though it is already masked out. */ curr_value = readl( bfa->ioc.ioc_regs.pss_err_status_reg); writel(curr_value, bfa->ioc.ioc_regs.pss_err_status_reg); } writel(intr, bfa->iocfc.bfa_regs.intr_status); bfa_ioc_error_isr(&bfa->ioc); } } /* * BFA IOC FC related functions */ /* * BFA IOC private functions */ /* * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ */ static void bfa_iocfc_send_cfg(void *bfa_arg) { struct bfa_s *bfa = bfa_arg; struct bfa_iocfc_s *iocfc = &bfa->iocfc; struct bfi_iocfc_cfg_req_s cfg_req; struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo; struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg; int i; WARN_ON(cfg->fwcfg.num_cqs > BFI_IOC_MAX_CQS); bfa_trc(bfa, cfg->fwcfg.num_cqs); bfa_iocfc_reset_queues(bfa); /* * initialize IOC configuration info */ cfg_info->single_msix_vec = 0; if (bfa->msix.nvecs == 1) cfg_info->single_msix_vec = 1; cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG; cfg_info->num_cqs = cfg->fwcfg.num_cqs; cfg_info->num_ioim_reqs = cpu_to_be16(bfa_fcpim_get_throttle_cfg(bfa, cfg->fwcfg.num_ioim_reqs)); cfg_info->num_fwtio_reqs = cpu_to_be16(cfg->fwcfg.num_fwtio_reqs); bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa); /* * dma map REQ and RSP circular queues and shadow pointers */ for (i = 0; i < cfg->fwcfg.num_cqs; i++) { bfa_dma_be_addr_set(cfg_info->req_cq_ba[i], iocfc->req_cq_ba[i].pa); bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i], iocfc->req_cq_shadow_ci[i].pa); cfg_info->req_cq_elems[i] = cpu_to_be16(cfg->drvcfg.num_reqq_elems); bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i], iocfc->rsp_cq_ba[i].pa); bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i], iocfc->rsp_cq_shadow_pi[i].pa); cfg_info->rsp_cq_elems[i] = cpu_to_be16(cfg->drvcfg.num_rspq_elems); } /* * Enable interrupt coalescing if it is driver init path * and not ioc disable/enable path. */ if (bfa_fsm_cmp_state(iocfc, bfa_iocfc_sm_init_cfg_wait)) cfg_info->intr_attr.coalesce = BFA_TRUE; /* * dma map IOC configuration itself */ bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ, bfa_fn_lpu(bfa)); bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa); bfa_ioc_mbox_send(&bfa->ioc, &cfg_req, sizeof(struct bfi_iocfc_cfg_req_s)); } static void bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev) { struct bfa_iocfc_s *iocfc = &bfa->iocfc; bfa->bfad = bfad; iocfc->bfa = bfa; iocfc->cfg = *cfg; /* * Initialize chip specific handlers. */ if (bfa_asic_id_ctc(bfa_ioc_devid(&bfa->ioc))) { iocfc->hwif.hw_reginit = bfa_hwct_reginit; iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack; iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack; iocfc->hwif.hw_msix_init = bfa_hwct_msix_init; iocfc->hwif.hw_msix_ctrl_install = bfa_hwct_msix_ctrl_install; iocfc->hwif.hw_msix_queue_install = bfa_hwct_msix_queue_install; iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall; iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set; iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs; iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range; iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CT; iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CT; } else { iocfc->hwif.hw_reginit = bfa_hwcb_reginit; iocfc->hwif.hw_reqq_ack = NULL; iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack; iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init; iocfc->hwif.hw_msix_ctrl_install = bfa_hwcb_msix_ctrl_install; iocfc->hwif.hw_msix_queue_install = bfa_hwcb_msix_queue_install; iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall; iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set; iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs; iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range; iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CB + bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS; iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CB + bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS; } if (bfa_asic_id_ct2(bfa_ioc_devid(&bfa->ioc))) { iocfc->hwif.hw_reginit = bfa_hwct2_reginit; iocfc->hwif.hw_isr_mode_set = NULL; iocfc->hwif.hw_rspq_ack = bfa_hwct2_rspq_ack; } iocfc->hwif.hw_reginit(bfa); bfa->msix.nvecs = 0; } static void bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg) { u8 *dm_kva = NULL; u64 dm_pa = 0; int i, per_reqq_sz, per_rspq_sz; struct bfa_iocfc_s *iocfc = &bfa->iocfc; struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa); struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa); struct bfa_mem_dma_s *reqq_dma, *rspq_dma; /* First allocate dma memory for IOC */ bfa_ioc_mem_claim(&bfa->ioc, bfa_mem_dma_virt(ioc_dma), bfa_mem_dma_phys(ioc_dma)); /* Claim DMA-able memory for the request/response queues */ per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ), BFA_DMA_ALIGN_SZ); per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ), BFA_DMA_ALIGN_SZ); for (i = 0; i < cfg->fwcfg.num_cqs; i++) { reqq_dma = BFA_MEM_REQQ_DMA(bfa, i); iocfc->req_cq_ba[i].kva = bfa_mem_dma_virt(reqq_dma); iocfc->req_cq_ba[i].pa = bfa_mem_dma_phys(reqq_dma); memset(iocfc->req_cq_ba[i].kva, 0, per_reqq_sz); rspq_dma = BFA_MEM_RSPQ_DMA(bfa, i); iocfc->rsp_cq_ba[i].kva = bfa_mem_dma_virt(rspq_dma); iocfc->rsp_cq_ba[i].pa = bfa_mem_dma_phys(rspq_dma); memset(iocfc->rsp_cq_ba[i].kva, 0, per_rspq_sz); } /* Claim IOCFC dma memory - for shadow CI/PI */ dm_kva = bfa_mem_dma_virt(iocfc_dma); dm_pa = bfa_mem_dma_phys(iocfc_dma); for (i = 0; i < cfg->fwcfg.num_cqs; i++) { iocfc->req_cq_shadow_ci[i].kva = dm_kva; iocfc->req_cq_shadow_ci[i].pa = dm_pa; dm_kva += BFA_CACHELINE_SZ; dm_pa += BFA_CACHELINE_SZ; iocfc->rsp_cq_shadow_pi[i].kva = dm_kva; iocfc->rsp_cq_shadow_pi[i].pa = dm_pa; dm_kva += BFA_CACHELINE_SZ; dm_pa += BFA_CACHELINE_SZ; } /* Claim IOCFC dma memory - for the config info page */ bfa->iocfc.cfg_info.kva = dm_kva; bfa->iocfc.cfg_info.pa = dm_pa; bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva; dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); /* Claim IOCFC dma memory - for the config response */ bfa->iocfc.cfgrsp_dma.kva = dm_kva; bfa->iocfc.cfgrsp_dma.pa = dm_pa; bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva; dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), BFA_CACHELINE_SZ); dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), BFA_CACHELINE_SZ); /* Claim IOCFC kva memory */ bfa_ioc_debug_memclaim(&bfa->ioc, bfa_mem_kva_curp(iocfc)); bfa_mem_kva_curp(iocfc) += BFA_DBG_FWTRC_LEN; } /* * Start BFA submodules. */ static void bfa_iocfc_start_submod(struct bfa_s *bfa) { int i; bfa->queue_process = BFA_TRUE; for (i = 0; i < BFI_IOC_MAX_CQS; i++) bfa_isr_rspq_ack(bfa, i, bfa_rspq_ci(bfa, i)); bfa_fcport_start(bfa); bfa_uf_start(bfa); /* * bfa_init() with flash read is complete. now invalidate the stale * content of lun mask like unit attention, rp tag and lp tag. */ bfa_ioim_lm_init(BFA_FCP_MOD(bfa)->bfa); bfa->iocfc.submod_enabled = BFA_TRUE; } /* * Disable BFA submodules. */ static void bfa_iocfc_disable_submod(struct bfa_s *bfa) { if (bfa->iocfc.submod_enabled == BFA_FALSE) return; bfa_fcdiag_iocdisable(bfa); bfa_fcport_iocdisable(bfa); bfa_fcxp_iocdisable(bfa); bfa_lps_iocdisable(bfa); bfa_rport_iocdisable(bfa); bfa_fcp_iocdisable(bfa); bfa_dconf_iocdisable(bfa); bfa->iocfc.submod_enabled = BFA_FALSE; } static void bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete) { struct bfa_s *bfa = bfa_arg; if (complete) bfa_cb_init(bfa->bfad, bfa->iocfc.op_status); } static void bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl) { struct bfa_s *bfa = bfa_arg; struct bfad_s *bfad = bfa->bfad; if (compl) complete(&bfad->comp); } static void bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl) { struct bfa_s *bfa = bfa_arg; struct bfad_s *bfad = bfa->bfad; if (compl) complete(&bfad->enable_comp); } static void bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl) { struct bfa_s *bfa = bfa_arg; struct bfad_s *bfad = bfa->bfad; if (compl) complete(&bfad->disable_comp); } /* * configure queue registers from firmware response */ static void bfa_iocfc_qreg(struct bfa_s *bfa, struct bfi_iocfc_qreg_s *qreg) { int i; struct bfa_iocfc_regs_s *r = &bfa->iocfc.bfa_regs; void __iomem *kva = bfa_ioc_bar0(&bfa->ioc); for (i = 0; i < BFI_IOC_MAX_CQS; i++) { bfa->iocfc.hw_qid[i] = qreg->hw_qid[i]; r->cpe_q_ci[i] = kva + be32_to_cpu(qreg->cpe_q_ci_off[i]); r->cpe_q_pi[i] = kva + be32_to_cpu(qreg->cpe_q_pi_off[i]); r->cpe_q_ctrl[i] = kva + be32_to_cpu(qreg->cpe_qctl_off[i]); r->rme_q_ci[i] = kva + be32_to_cpu(qreg->rme_q_ci_off[i]); r->rme_q_pi[i] = kva + be32_to_cpu(qreg->rme_q_pi_off[i]); r->rme_q_ctrl[i] = kva + be32_to_cpu(qreg->rme_qctl_off[i]); } } static void bfa_iocfc_res_recfg(struct bfa_s *bfa, struct bfa_iocfc_fwcfg_s *fwcfg) { struct bfa_iocfc_s *iocfc = &bfa->iocfc; struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo; bfa_fcxp_res_recfg(bfa, fwcfg->num_fcxp_reqs); bfa_uf_res_recfg(bfa, fwcfg->num_uf_bufs); bfa_rport_res_recfg(bfa, fwcfg->num_rports); bfa_fcp_res_recfg(bfa, cpu_to_be16(cfg_info->num_ioim_reqs), fwcfg->num_ioim_reqs); bfa_tskim_res_recfg(bfa, fwcfg->num_tskim_reqs); } /* * Update BFA configuration from firmware configuration. */ static void bfa_iocfc_cfgrsp(struct bfa_s *bfa) { struct bfa_iocfc_s *iocfc = &bfa->iocfc; struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg; fwcfg->num_cqs = fwcfg->num_cqs; fwcfg->num_ioim_reqs = be16_to_cpu(fwcfg->num_ioim_reqs); fwcfg->num_fwtio_reqs = be16_to_cpu(fwcfg->num_fwtio_reqs); fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs); fwcfg->num_fcxp_reqs = be16_to_cpu(fwcfg->num_fcxp_reqs); fwcfg->num_uf_bufs = be16_to_cpu(fwcfg->num_uf_bufs); fwcfg->num_rports = be16_to_cpu(fwcfg->num_rports); /* * configure queue register offsets as learnt from firmware */ bfa_iocfc_qreg(bfa, &cfgrsp->qreg); /* * Re-configure resources as learnt from Firmware */ bfa_iocfc_res_recfg(bfa, fwcfg); /* * Install MSIX queue handlers */ bfa_msix_queue_install(bfa); if (bfa->iocfc.cfgrsp->pbc_cfg.pbc_pwwn != 0) { bfa->ioc.attr->pwwn = bfa->iocfc.cfgrsp->pbc_cfg.pbc_pwwn; bfa->ioc.attr->nwwn = bfa->iocfc.cfgrsp->pbc_cfg.pbc_nwwn; bfa_fsm_send_event(iocfc, IOCFC_E_CFG_DONE); } } void bfa_iocfc_reset_queues(struct bfa_s *bfa) { int q; for (q = 0; q < BFI_IOC_MAX_CQS; q++) { bfa_reqq_ci(bfa, q) = 0; bfa_reqq_pi(bfa, q) = 0; bfa_rspq_ci(bfa, q) = 0; bfa_rspq_pi(bfa, q) = 0; } } /* * Process FAA pwwn msg from fw. */ static void bfa_iocfc_process_faa_addr(struct bfa_s *bfa, struct bfi_faa_addr_msg_s *msg) { struct bfa_iocfc_s *iocfc = &bfa->iocfc; struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; cfgrsp->pbc_cfg.pbc_pwwn = msg->pwwn; cfgrsp->pbc_cfg.pbc_nwwn = msg->nwwn; bfa->ioc.attr->pwwn = msg->pwwn; bfa->ioc.attr->nwwn = msg->nwwn; bfa_fsm_send_event(iocfc, IOCFC_E_CFG_DONE); } /* Fabric Assigned Address specific functions */ /* * Check whether IOC is ready before sending command down */ static bfa_status_t bfa_faa_validate_request(struct bfa_s *bfa) { enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa); u32 card_type = bfa->ioc.attr->card_type; if (bfa_ioc_is_operational(&bfa->ioc)) { if ((ioc_type != BFA_IOC_TYPE_FC) || bfa_mfg_is_mezz(card_type)) return BFA_STATUS_FEATURE_NOT_SUPPORTED; } else { return BFA_STATUS_IOC_NON_OP; } return BFA_STATUS_OK; } bfa_status_t bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr, bfa_cb_iocfc_t cbfn, void *cbarg) { struct bfi_faa_query_s faa_attr_req; struct bfa_iocfc_s *iocfc = &bfa->iocfc; bfa_status_t status; status = bfa_faa_validate_request(bfa); if (status != BFA_STATUS_OK) return status; if (iocfc->faa_args.busy == BFA_TRUE) return BFA_STATUS_DEVBUSY; iocfc->faa_args.faa_attr = attr; iocfc->faa_args.faa_cb.faa_cbfn = cbfn; iocfc->faa_args.faa_cb.faa_cbarg = cbarg; iocfc->faa_args.busy = BFA_TRUE; memset(&faa_attr_req, 0, sizeof(struct bfi_faa_query_s)); bfi_h2i_set(faa_attr_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_FAA_QUERY_REQ, bfa_fn_lpu(bfa)); bfa_ioc_mbox_send(&bfa->ioc, &faa_attr_req, sizeof(struct bfi_faa_query_s)); return BFA_STATUS_OK; } /* * FAA query response */ static void bfa_faa_query_reply(struct bfa_iocfc_s *iocfc, bfi_faa_query_rsp_t *rsp) { void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg; if (iocfc->faa_args.faa_attr) { iocfc->faa_args.faa_attr->faa = rsp->faa; iocfc->faa_args.faa_attr->faa_state = rsp->faa_status; iocfc->faa_args.faa_attr->pwwn_source = rsp->addr_source; } WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn); iocfc->faa_args.faa_cb.faa_cbfn(cbarg, BFA_STATUS_OK); iocfc->faa_args.busy = BFA_FALSE; } /* * IOC enable request is complete */ static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status) { struct bfa_s *bfa = bfa_arg; if (status == BFA_STATUS_OK) bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_ENABLED); else bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_FAILED); } /* * IOC disable request is complete */ static void bfa_iocfc_disable_cbfn(void *bfa_arg) { struct bfa_s *bfa = bfa_arg; bfa->queue_process = BFA_FALSE; bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_DISABLED); } /* * Notify sub-modules of hardware failure. */ static void bfa_iocfc_hbfail_cbfn(void *bfa_arg) { struct bfa_s *bfa = bfa_arg; bfa->queue_process = BFA_FALSE; bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_FAILED); } /* * Actions on chip-reset completion. */ static void bfa_iocfc_reset_cbfn(void *bfa_arg) { struct bfa_s *bfa = bfa_arg; bfa_iocfc_reset_queues(bfa); bfa_isr_enable(bfa); } /* * Query IOC memory requirement information. */ void bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, struct bfa_s *bfa) { int q, per_reqq_sz, per_rspq_sz; struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa); struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa); struct bfa_mem_kva_s *iocfc_kva = BFA_MEM_IOCFC_KVA(bfa); u32 dm_len = 0; /* dma memory setup for IOC */ bfa_mem_dma_setup(meminfo, ioc_dma, BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ)); /* dma memory setup for REQ/RSP queues */ per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ), BFA_DMA_ALIGN_SZ); per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ), BFA_DMA_ALIGN_SZ); for (q = 0; q < cfg->fwcfg.num_cqs; q++) { bfa_mem_dma_setup(meminfo, BFA_MEM_REQQ_DMA(bfa, q), per_reqq_sz); bfa_mem_dma_setup(meminfo, BFA_MEM_RSPQ_DMA(bfa, q), per_rspq_sz); } /* IOCFC dma memory - calculate Shadow CI/PI size */ for (q = 0; q < cfg->fwcfg.num_cqs; q++) dm_len += (2 * BFA_CACHELINE_SZ); /* IOCFC dma memory - calculate config info / rsp size */ dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), BFA_CACHELINE_SZ); /* dma memory setup for IOCFC */ bfa_mem_dma_setup(meminfo, iocfc_dma, dm_len); /* kva memory setup for IOCFC */ bfa_mem_kva_setup(meminfo, iocfc_kva, BFA_DBG_FWTRC_LEN); } /* * Query IOC memory requirement information. */ void bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev) { int i; struct bfa_ioc_s *ioc = &bfa->ioc; bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn; bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn; bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn; bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn; ioc->trcmod = bfa->trcmod; bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod); bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_PCIFN_CLASS_FC); bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs); bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev); bfa_iocfc_mem_claim(bfa, cfg); INIT_LIST_HEAD(&bfa->timer_mod.timer_q); INIT_LIST_HEAD(&bfa->comp_q); for (i = 0; i < BFI_IOC_MAX_CQS; i++) INIT_LIST_HEAD(&bfa->reqq_waitq[i]); bfa->iocfc.cb_reqd = BFA_FALSE; bfa->iocfc.op_status = BFA_STATUS_OK; bfa->iocfc.submod_enabled = BFA_FALSE; bfa_fsm_set_state(&bfa->iocfc, bfa_iocfc_sm_stopped); } /* * Query IOC memory requirement information. */ void bfa_iocfc_init(struct bfa_s *bfa) { bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_INIT); } /* * IOC start called from bfa_start(). Called to start IOC operations * at driver instantiation for this instance. */ void bfa_iocfc_start(struct bfa_s *bfa) { bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_START); } /* * IOC stop called from bfa_stop(). Called only when driver is unloaded * for this instance. */ void bfa_iocfc_stop(struct bfa_s *bfa) { bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_STOP); } void bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m) { struct bfa_s *bfa = bfaarg; struct bfa_iocfc_s *iocfc = &bfa->iocfc; union bfi_iocfc_i2h_msg_u *msg; msg = (union bfi_iocfc_i2h_msg_u *) m; bfa_trc(bfa, msg->mh.msg_id); switch (msg->mh.msg_id) { case BFI_IOCFC_I2H_CFG_REPLY: bfa_iocfc_cfgrsp(bfa); break; case BFI_IOCFC_I2H_UPDATEQ_RSP: iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK); break; case BFI_IOCFC_I2H_ADDR_MSG: bfa_iocfc_process_faa_addr(bfa, (struct bfi_faa_addr_msg_s *)msg); break; case BFI_IOCFC_I2H_FAA_QUERY_RSP: bfa_faa_query_reply(iocfc, (bfi_faa_query_rsp_t *)msg); break; default: WARN_ON(1); } } void bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr) { struct bfa_iocfc_s *iocfc = &bfa->iocfc; attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce; attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ? be16_to_cpu(iocfc->cfginfo->intr_attr.delay) : be16_to_cpu(iocfc->cfgrsp->intr_attr.delay); attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ? be16_to_cpu(iocfc->cfginfo->intr_attr.latency) : be16_to_cpu(iocfc->cfgrsp->intr_attr.latency); attr->config = iocfc->cfg; } bfa_status_t bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr) { struct bfa_iocfc_s *iocfc = &bfa->iocfc; struct bfi_iocfc_set_intr_req_s *m; iocfc->cfginfo->intr_attr.coalesce = attr->coalesce; iocfc->cfginfo->intr_attr.delay = cpu_to_be16(attr->delay); iocfc->cfginfo->intr_attr.latency = cpu_to_be16(attr->latency); if (!bfa_iocfc_is_operational(bfa)) return BFA_STATUS_OK; m = bfa_reqq_next(bfa, BFA_REQQ_IOC); if (!m) return BFA_STATUS_DEVBUSY; bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ, bfa_fn_lpu(bfa)); m->coalesce = iocfc->cfginfo->intr_attr.coalesce; m->delay = iocfc->cfginfo->intr_attr.delay; m->latency = iocfc->cfginfo->intr_attr.latency; bfa_trc(bfa, attr->delay); bfa_trc(bfa, attr->latency); bfa_reqq_produce(bfa, BFA_REQQ_IOC, m->mh); return BFA_STATUS_OK; } void bfa_iocfc_set_snsbase(struct bfa_s *bfa, int seg_no, u64 snsbase_pa) { struct bfa_iocfc_s *iocfc = &bfa->iocfc; iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1); bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase[seg_no], snsbase_pa); } /* * Enable IOC after it is disabled. */ void bfa_iocfc_enable(struct bfa_s *bfa) { bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0, "IOC Enable"); bfa->iocfc.cb_reqd = BFA_TRUE; bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_ENABLE); } void bfa_iocfc_disable(struct bfa_s *bfa) { bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0, "IOC Disable"); bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DISABLE); } bfa_boolean_t bfa_iocfc_is_operational(struct bfa_s *bfa) { return bfa_ioc_is_operational(&bfa->ioc) && bfa_fsm_cmp_state(&bfa->iocfc, bfa_iocfc_sm_operational); } /* * Return boot target port wwns -- read from boot information in flash. */ void bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns) { struct bfa_iocfc_s *iocfc = &bfa->iocfc; struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; int i; if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) { bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns); *nwwns = cfgrsp->pbc_cfg.nbluns; for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++) wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn; return; } *nwwns = cfgrsp->bootwwns.nwwns; memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn)); } int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport) { struct bfa_iocfc_s *iocfc = &bfa->iocfc; struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport)); return cfgrsp->pbc_cfg.nvports; } /* * Use this function query the memory requirement of the BFA library. * This function needs to be called before bfa_attach() to get the * memory required of the BFA layer for a given driver configuration. * * This call will fail, if the cap is out of range compared to pre-defined * values within the BFA library * * @param[in] cfg - pointer to bfa_ioc_cfg_t. Driver layer should indicate * its configuration in this structure. * The default values for struct bfa_iocfc_cfg_s can be * fetched using bfa_cfg_get_default() API. * * If cap's boundary check fails, the library will use * the default bfa_cap_t values (and log a warning msg). * * @param[out] meminfo - pointer to bfa_meminfo_t. This content * indicates the memory type (see bfa_mem_type_t) and * amount of memory required. * * Driver should allocate the memory, populate the * starting address for each block and provide the same * structure as input parameter to bfa_attach() call. * * @param[in] bfa - pointer to the bfa structure, used while fetching the * dma, kva memory information of the bfa sub-modules. * * @return void * * Special Considerations: @note */ void bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, struct bfa_s *bfa) { struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa); struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa); struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa); struct bfa_mem_dma_s *sfp_dma = BFA_MEM_SFP_DMA(bfa); struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa); struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa); struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa); struct bfa_mem_dma_s *fru_dma = BFA_MEM_FRU_DMA(bfa); WARN_ON((cfg == NULL) || (meminfo == NULL)); memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s)); /* Initialize the DMA & KVA meminfo queues */ INIT_LIST_HEAD(&meminfo->dma_info.qe); INIT_LIST_HEAD(&meminfo->kva_info.qe); bfa_iocfc_meminfo(cfg, meminfo, bfa); bfa_sgpg_meminfo(cfg, meminfo, bfa); bfa_fcport_meminfo(cfg, meminfo, bfa); bfa_fcxp_meminfo(cfg, meminfo, bfa); bfa_lps_meminfo(cfg, meminfo, bfa); bfa_uf_meminfo(cfg, meminfo, bfa); bfa_rport_meminfo(cfg, meminfo, bfa); bfa_fcp_meminfo(cfg, meminfo, bfa); bfa_dconf_meminfo(cfg, meminfo, bfa); /* dma info setup */ bfa_mem_dma_setup(meminfo, port_dma, bfa_port_meminfo()); bfa_mem_dma_setup(meminfo, ablk_dma, bfa_ablk_meminfo()); bfa_mem_dma_setup(meminfo, cee_dma, bfa_cee_meminfo()); bfa_mem_dma_setup(meminfo, sfp_dma, bfa_sfp_meminfo()); bfa_mem_dma_setup(meminfo, flash_dma, bfa_flash_meminfo(cfg->drvcfg.min_cfg)); bfa_mem_dma_setup(meminfo, diag_dma, bfa_diag_meminfo()); bfa_mem_dma_setup(meminfo, phy_dma, bfa_phy_meminfo(cfg->drvcfg.min_cfg)); bfa_mem_dma_setup(meminfo, fru_dma, bfa_fru_meminfo(cfg->drvcfg.min_cfg)); } /* * Use this function to do attach the driver instance with the BFA * library. This function will not trigger any HW initialization * process (which will be done in bfa_init() call) * * This call will fail, if the cap is out of range compared to * pre-defined values within the BFA library * * @param[out] bfa Pointer to bfa_t. * @param[in] bfad Opaque handle back to the driver's IOC structure * @param[in] cfg Pointer to bfa_ioc_cfg_t. Should be same structure * that was used in bfa_cfg_get_meminfo(). * @param[in] meminfo Pointer to bfa_meminfo_t. The driver should * use the bfa_cfg_get_meminfo() call to * find the memory blocks required, allocate the * required memory and provide the starting addresses. * @param[in] pcidev pointer to struct bfa_pcidev_s * * @return * void * * Special Considerations: * * @note * */ void bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) { struct bfa_mem_dma_s *dma_info, *dma_elem; struct bfa_mem_kva_s *kva_info, *kva_elem; struct list_head *dm_qe, *km_qe; bfa->fcs = BFA_FALSE; WARN_ON((cfg == NULL) || (meminfo == NULL)); /* Initialize memory pointers for iterative allocation */ dma_info = &meminfo->dma_info; dma_info->kva_curp = dma_info->kva; dma_info->dma_curp = dma_info->dma; kva_info = &meminfo->kva_info; kva_info->kva_curp = kva_info->kva; list_for_each(dm_qe, &dma_info->qe) { dma_elem = (struct bfa_mem_dma_s *) dm_qe; dma_elem->kva_curp = dma_elem->kva; dma_elem->dma_curp = dma_elem->dma; } list_for_each(km_qe, &kva_info->qe) { kva_elem = (struct bfa_mem_kva_s *) km_qe; kva_elem->kva_curp = kva_elem->kva; } bfa_iocfc_attach(bfa, bfad, cfg, pcidev); bfa_fcdiag_attach(bfa, bfad, cfg, pcidev); bfa_sgpg_attach(bfa, bfad, cfg, pcidev); bfa_fcport_attach(bfa, bfad, cfg, pcidev); bfa_fcxp_attach(bfa, bfad, cfg, pcidev); bfa_lps_attach(bfa, bfad, cfg, pcidev); bfa_uf_attach(bfa, bfad, cfg, pcidev); bfa_rport_attach(bfa, bfad, cfg, pcidev); bfa_fcp_attach(bfa, bfad, cfg, pcidev); bfa_dconf_attach(bfa, bfad, cfg); bfa_com_port_attach(bfa); bfa_com_ablk_attach(bfa); bfa_com_cee_attach(bfa); bfa_com_sfp_attach(bfa); bfa_com_flash_attach(bfa, cfg->drvcfg.min_cfg); bfa_com_diag_attach(bfa); bfa_com_phy_attach(bfa, cfg->drvcfg.min_cfg); bfa_com_fru_attach(bfa, cfg->drvcfg.min_cfg); } /* * Use this function to delete a BFA IOC. IOC should be stopped (by * calling bfa_stop()) before this function call. * * @param[in] bfa - pointer to bfa_t. * * @return * void * * Special Considerations: * * @note */ void bfa_detach(struct bfa_s *bfa) { bfa_ioc_detach(&bfa->ioc); } void bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q) { INIT_LIST_HEAD(comp_q); list_splice_tail_init(&bfa->comp_q, comp_q); } void bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q) { struct list_head *qe; struct list_head *qen; struct bfa_cb_qe_s *hcb_qe; bfa_cb_cbfn_status_t cbfn; list_for_each_safe(qe, qen, comp_q) { hcb_qe = (struct bfa_cb_qe_s *) qe; if (hcb_qe->pre_rmv) { /* qe is invalid after return, dequeue before cbfn() */ list_del(qe); cbfn = (bfa_cb_cbfn_status_t)(hcb_qe->cbfn); cbfn(hcb_qe->cbarg, hcb_qe->fw_status); } else hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE); } } void bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q) { struct list_head *qe; struct bfa_cb_qe_s *hcb_qe; while (!list_empty(comp_q)) { bfa_q_deq(comp_q, &qe); hcb_qe = (struct bfa_cb_qe_s *) qe; WARN_ON(hcb_qe->pre_rmv); hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE); } } /* * Return the list of PCI vendor/device id lists supported by this * BFA instance. */ void bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids) { static struct bfa_pciid_s __pciids[] = { {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G2P}, {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G1P}, {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT}, {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC}, }; *npciids = ARRAY_SIZE(__pciids); *pciids = __pciids; } /* * Use this function query the default struct bfa_iocfc_cfg_s value (compiled * into BFA layer). The OS driver can then turn back and overwrite entries that * have been configured by the user. * * @param[in] cfg - pointer to bfa_ioc_cfg_t * * @return * void * * Special Considerations: * note */ void bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg) { cfg->fwcfg.num_fabrics = DEF_CFG_NUM_FABRICS; cfg->fwcfg.num_lports = DEF_CFG_NUM_LPORTS; cfg->fwcfg.num_rports = DEF_CFG_NUM_RPORTS; cfg->fwcfg.num_ioim_reqs = DEF_CFG_NUM_IOIM_REQS; cfg->fwcfg.num_tskim_reqs = DEF_CFG_NUM_TSKIM_REQS; cfg->fwcfg.num_fcxp_reqs = DEF_CFG_NUM_FCXP_REQS; cfg->fwcfg.num_uf_bufs = DEF_CFG_NUM_UF_BUFS; cfg->fwcfg.num_cqs = DEF_CFG_NUM_CQS; cfg->fwcfg.num_fwtio_reqs = 0; cfg->drvcfg.num_reqq_elems = DEF_CFG_NUM_REQQ_ELEMS; cfg->drvcfg.num_rspq_elems = DEF_CFG_NUM_RSPQ_ELEMS; cfg->drvcfg.num_sgpgs = DEF_CFG_NUM_SGPGS; cfg->drvcfg.num_sboot_tgts = DEF_CFG_NUM_SBOOT_TGTS; cfg->drvcfg.num_sboot_luns = DEF_CFG_NUM_SBOOT_LUNS; cfg->drvcfg.path_tov = BFA_FCPIM_PATHTOV_DEF; cfg->drvcfg.ioc_recover = BFA_FALSE; cfg->drvcfg.delay_comp = BFA_FALSE; } void bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg) { bfa_cfg_get_default(cfg); cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN; cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN; cfg->fwcfg.num_fcxp_reqs = BFA_FCXP_MIN; cfg->fwcfg.num_uf_bufs = BFA_UF_MIN; cfg->fwcfg.num_rports = BFA_RPORT_MIN; cfg->fwcfg.num_fwtio_reqs = 0; cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN; cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN; cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN; cfg->drvcfg.min_cfg = BFA_TRUE; }
linux-master
drivers/scsi/bfa/bfa_core.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. * Copyright (c) 2014- QLogic Corporation. * All rights reserved * www.qlogic.com * * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. */ /* * bfad.c Linux driver PCI interface module. */ #include <linux/module.h> #include <linux/kthread.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/pci.h> #include <linux/firmware.h> #include <linux/uaccess.h> #include <asm/fcntl.h> #include "bfad_drv.h" #include "bfad_im.h" #include "bfa_fcs.h" #include "bfa_defs.h" #include "bfa.h" BFA_TRC_FILE(LDRV, BFAD); DEFINE_MUTEX(bfad_mutex); LIST_HEAD(bfad_list); static int bfad_inst; static int num_sgpgs_parm; int supported_fc4s; char *host_name, *os_name, *os_patch; int num_rports, num_ios, num_tms; int num_fcxps, num_ufbufs; int reqq_size, rspq_size, num_sgpgs; int rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT; int bfa_lun_queue_depth = BFAD_LUN_QUEUE_DEPTH; int bfa_io_max_sge = BFAD_IO_MAX_SGE; int bfa_log_level = 3; /* WARNING log level */ int ioc_auto_recover = BFA_TRUE; int bfa_linkup_delay = -1; int fdmi_enable = BFA_TRUE; int pcie_max_read_reqsz; int bfa_debugfs_enable = 1; int msix_disable_cb = 0, msix_disable_ct = 0; int max_xfer_size = BFAD_MAX_SECTORS >> 1; static int max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS; /* Firmware releated */ u32 bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size; u32 *bfi_image_cb, *bfi_image_ct, *bfi_image_ct2; #define BFAD_FW_FILE_CB "cbfw-3.2.5.1.bin" #define BFAD_FW_FILE_CT "ctfw-3.2.5.1.bin" #define BFAD_FW_FILE_CT2 "ct2fw-3.2.5.1.bin" static u32 *bfad_load_fwimg(struct pci_dev *pdev); static void bfad_free_fwimg(void); static void bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image, u32 *bfi_image_size, char *fw_name); static const char *msix_name_ct[] = { "ctrl", "cpe0", "cpe1", "cpe2", "cpe3", "rme0", "rme1", "rme2", "rme3" }; static const char *msix_name_cb[] = { "cpe0", "cpe1", "cpe2", "cpe3", "rme0", "rme1", "rme2", "rme3", "eemc", "elpu0", "elpu1", "epss", "mlpu" }; MODULE_FIRMWARE(BFAD_FW_FILE_CB); MODULE_FIRMWARE(BFAD_FW_FILE_CT); MODULE_FIRMWARE(BFAD_FW_FILE_CT2); module_param(os_name, charp, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(os_name, "OS name of the hba host machine"); module_param(os_patch, charp, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(os_patch, "OS patch level of the hba host machine"); module_param(host_name, charp, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(host_name, "Hostname of the hba host machine"); module_param(num_rports, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(num_rports, "Max number of rports supported per port " "(physical/logical), default=1024"); module_param(num_ios, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(num_ios, "Max number of ioim requests, default=2000"); module_param(num_tms, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(num_tms, "Max number of task im requests, default=128"); module_param(num_fcxps, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(num_fcxps, "Max number of fcxp requests, default=64"); module_param(num_ufbufs, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(num_ufbufs, "Max number of unsolicited frame " "buffers, default=64"); module_param(reqq_size, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(reqq_size, "Max number of request queue elements, " "default=256"); module_param(rspq_size, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(rspq_size, "Max number of response queue elements, " "default=64"); module_param(num_sgpgs, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(num_sgpgs, "Number of scatter/gather pages, default=2048"); module_param(rport_del_timeout, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(rport_del_timeout, "Rport delete timeout, default=90 secs, " "Range[>0]"); module_param(bfa_lun_queue_depth, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(bfa_lun_queue_depth, "Lun queue depth, default=32, Range[>0]"); module_param(bfa_io_max_sge, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(bfa_io_max_sge, "Max io scatter/gather elements, default=255"); module_param(bfa_log_level, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(bfa_log_level, "Driver log level, default=3, " "Range[Critical:1|Error:2|Warning:3|Info:4]"); module_param(ioc_auto_recover, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ioc_auto_recover, "IOC auto recovery, default=1, " "Range[off:0|on:1]"); module_param(bfa_linkup_delay, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(bfa_linkup_delay, "Link up delay, default=30 secs for " "boot port. Otherwise 10 secs in RHEL4 & 0 for " "[RHEL5, SLES10, ESX40] Range[>0]"); module_param(msix_disable_cb, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(msix_disable_cb, "Disable Message Signaled Interrupts for QLogic-415/425/815/825 cards, default=0 Range[false:0|true:1]"); module_param(msix_disable_ct, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(msix_disable_ct, "Disable Message Signaled Interrupts if possible for QLogic-1010/1020/804/1007/902/1741 cards, default=0, Range[false:0|true:1]"); module_param(fdmi_enable, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(fdmi_enable, "Enables fdmi registration, default=1, " "Range[false:0|true:1]"); module_param(pcie_max_read_reqsz, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(pcie_max_read_reqsz, "PCIe max read request size, default=0 " "(use system setting), Range[128|256|512|1024|2048|4096]"); module_param(bfa_debugfs_enable, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(bfa_debugfs_enable, "Enables debugfs feature, default=1," " Range[false:0|true:1]"); module_param(max_xfer_size, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(max_xfer_size, "default=32MB," " Range[64k|128k|256k|512k|1024k|2048k]"); module_param(max_rport_logins, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(max_rport_logins, "Max number of logins to initiator and target rports on a port (physical/logical), default=1024"); static void bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event); static void bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event); static void bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event); static void bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event); static void bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event); static void bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event); static void bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event); /* * Beginning state for the driver instance, awaiting the pci_probe event */ static void bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event) { bfa_trc(bfad, event); switch (event) { case BFAD_E_CREATE: bfa_sm_set_state(bfad, bfad_sm_created); bfad->bfad_tsk = kthread_create(bfad_worker, (void *) bfad, "%s", "bfad_worker"); if (IS_ERR(bfad->bfad_tsk)) { printk(KERN_INFO "bfad[%d]: Kernel thread " "creation failed!\n", bfad->inst_no); bfa_sm_send_event(bfad, BFAD_E_KTHREAD_CREATE_FAILED); } bfa_sm_send_event(bfad, BFAD_E_INIT); break; case BFAD_E_STOP: /* Ignore stop; already in uninit */ break; default: bfa_sm_fault(bfad, event); } } /* * Driver Instance is created, awaiting event INIT to initialize the bfad */ static void bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event) { unsigned long flags; bfa_status_t ret; bfa_trc(bfad, event); switch (event) { case BFAD_E_INIT: bfa_sm_set_state(bfad, bfad_sm_initializing); init_completion(&bfad->comp); /* Enable Interrupt and wait bfa_init completion */ if (bfad_setup_intr(bfad)) { printk(KERN_WARNING "bfad%d: bfad_setup_intr failed\n", bfad->inst_no); bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED); break; } spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_iocfc_init(&bfad->bfa); spin_unlock_irqrestore(&bfad->bfad_lock, flags); /* Set up interrupt handler for each vectors */ if ((bfad->bfad_flags & BFAD_MSIX_ON) && bfad_install_msix_handler(bfad)) { printk(KERN_WARNING "%s: install_msix failed, bfad%d\n", __func__, bfad->inst_no); } bfad_init_timer(bfad); wait_for_completion(&bfad->comp); if ((bfad->bfad_flags & BFAD_HAL_INIT_DONE)) { bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS); } else { printk(KERN_WARNING "bfa %s: bfa init failed\n", bfad->pci_name); spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_fcs_init(&bfad->bfa_fcs); spin_unlock_irqrestore(&bfad->bfad_lock, flags); ret = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM); if (ret != BFA_STATUS_OK) { init_completion(&bfad->comp); spin_lock_irqsave(&bfad->bfad_lock, flags); bfad->pport.flags |= BFAD_PORT_DELETE; bfa_fcs_exit(&bfad->bfa_fcs); spin_unlock_irqrestore(&bfad->bfad_lock, flags); wait_for_completion(&bfad->comp); bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED); break; } bfad->bfad_flags |= BFAD_HAL_INIT_FAIL; bfa_sm_send_event(bfad, BFAD_E_HAL_INIT_FAILED); } break; case BFAD_E_KTHREAD_CREATE_FAILED: bfa_sm_set_state(bfad, bfad_sm_uninit); break; default: bfa_sm_fault(bfad, event); } } static void bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event) { int retval; unsigned long flags; bfa_trc(bfad, event); switch (event) { case BFAD_E_INIT_SUCCESS: kthread_stop(bfad->bfad_tsk); spin_lock_irqsave(&bfad->bfad_lock, flags); bfad->bfad_tsk = NULL; spin_unlock_irqrestore(&bfad->bfad_lock, flags); retval = bfad_start_ops(bfad); if (retval != BFA_STATUS_OK) { bfa_sm_set_state(bfad, bfad_sm_failed); break; } bfa_sm_set_state(bfad, bfad_sm_operational); break; case BFAD_E_INIT_FAILED: bfa_sm_set_state(bfad, bfad_sm_uninit); kthread_stop(bfad->bfad_tsk); spin_lock_irqsave(&bfad->bfad_lock, flags); bfad->bfad_tsk = NULL; spin_unlock_irqrestore(&bfad->bfad_lock, flags); break; case BFAD_E_HAL_INIT_FAILED: bfa_sm_set_state(bfad, bfad_sm_failed); break; default: bfa_sm_fault(bfad, event); } } static void bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event) { int retval; bfa_trc(bfad, event); switch (event) { case BFAD_E_INIT_SUCCESS: retval = bfad_start_ops(bfad); if (retval != BFA_STATUS_OK) break; bfa_sm_set_state(bfad, bfad_sm_operational); break; case BFAD_E_STOP: bfa_sm_set_state(bfad, bfad_sm_fcs_exit); bfa_sm_send_event(bfad, BFAD_E_FCS_EXIT_COMP); break; case BFAD_E_EXIT_COMP: bfa_sm_set_state(bfad, bfad_sm_uninit); bfad_remove_intr(bfad); del_timer_sync(&bfad->hal_tmo); break; default: bfa_sm_fault(bfad, event); } } static void bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event) { bfa_trc(bfad, event); switch (event) { case BFAD_E_STOP: bfa_sm_set_state(bfad, bfad_sm_fcs_exit); bfad_fcs_stop(bfad); break; default: bfa_sm_fault(bfad, event); } } static void bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event) { bfa_trc(bfad, event); switch (event) { case BFAD_E_FCS_EXIT_COMP: bfa_sm_set_state(bfad, bfad_sm_stopping); bfad_stop(bfad); break; default: bfa_sm_fault(bfad, event); } } static void bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event) { bfa_trc(bfad, event); switch (event) { case BFAD_E_EXIT_COMP: bfa_sm_set_state(bfad, bfad_sm_uninit); bfad_remove_intr(bfad); del_timer_sync(&bfad->hal_tmo); bfad_im_probe_undo(bfad); bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE; bfad_uncfg_pport(bfad); break; default: bfa_sm_fault(bfad, event); break; } } /* * BFA callbacks */ void bfad_hcb_comp(void *arg, bfa_status_t status) { struct bfad_hal_comp *fcomp = (struct bfad_hal_comp *)arg; fcomp->status = status; complete(&fcomp->comp); } /* * bfa_init callback */ void bfa_cb_init(void *drv, bfa_status_t init_status) { struct bfad_s *bfad = drv; if (init_status == BFA_STATUS_OK) { bfad->bfad_flags |= BFAD_HAL_INIT_DONE; /* * If BFAD_HAL_INIT_FAIL flag is set: * Wake up the kernel thread to start * the bfad operations after HAL init done */ if ((bfad->bfad_flags & BFAD_HAL_INIT_FAIL)) { bfad->bfad_flags &= ~BFAD_HAL_INIT_FAIL; wake_up_process(bfad->bfad_tsk); } } complete(&bfad->comp); } /* * BFA_FCS callbacks */ struct bfad_port_s * bfa_fcb_lport_new(struct bfad_s *bfad, struct bfa_fcs_lport_s *port, enum bfa_lport_role roles, struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv) { bfa_status_t rc; struct bfad_port_s *port_drv; if (!vp_drv && !vf_drv) { port_drv = &bfad->pport; port_drv->pvb_type = BFAD_PORT_PHYS_BASE; } else if (!vp_drv && vf_drv) { port_drv = &vf_drv->base_port; port_drv->pvb_type = BFAD_PORT_VF_BASE; } else if (vp_drv && !vf_drv) { port_drv = &vp_drv->drv_port; port_drv->pvb_type = BFAD_PORT_PHYS_VPORT; } else { port_drv = &vp_drv->drv_port; port_drv->pvb_type = BFAD_PORT_VF_VPORT; } port_drv->fcs_port = port; port_drv->roles = roles; if (roles & BFA_LPORT_ROLE_FCP_IM) { rc = bfad_im_port_new(bfad, port_drv); if (rc != BFA_STATUS_OK) { bfad_im_port_delete(bfad, port_drv); port_drv = NULL; } } return port_drv; } /* * FCS RPORT alloc callback, after successful PLOGI by FCS */ bfa_status_t bfa_fcb_rport_alloc(struct bfad_s *bfad, struct bfa_fcs_rport_s **rport, struct bfad_rport_s **rport_drv) { bfa_status_t rc = BFA_STATUS_OK; *rport_drv = kzalloc(sizeof(struct bfad_rport_s), GFP_ATOMIC); if (*rport_drv == NULL) { rc = BFA_STATUS_ENOMEM; goto ext; } *rport = &(*rport_drv)->fcs_rport; ext: return rc; } /* * FCS PBC VPORT Create */ void bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s pbc_vport) { struct bfa_lport_cfg_s port_cfg = {0}; struct bfad_vport_s *vport; int rc; vport = kzalloc(sizeof(struct bfad_vport_s), GFP_ATOMIC); if (!vport) { bfa_trc(bfad, 0); return; } vport->drv_port.bfad = bfad; port_cfg.roles = BFA_LPORT_ROLE_FCP_IM; port_cfg.pwwn = pbc_vport.vp_pwwn; port_cfg.nwwn = pbc_vport.vp_nwwn; port_cfg.preboot_vp = BFA_TRUE; rc = bfa_fcs_pbc_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, 0, &port_cfg, vport); if (rc != BFA_STATUS_OK) { bfa_trc(bfad, 0); return; } list_add_tail(&vport->list_entry, &bfad->pbc_vport_list); } void bfad_hal_mem_release(struct bfad_s *bfad) { struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo; struct bfa_mem_dma_s *dma_info, *dma_elem; struct bfa_mem_kva_s *kva_info, *kva_elem; struct list_head *dm_qe, *km_qe; dma_info = &hal_meminfo->dma_info; kva_info = &hal_meminfo->kva_info; /* Iterate through the KVA meminfo queue */ list_for_each(km_qe, &kva_info->qe) { kva_elem = (struct bfa_mem_kva_s *) km_qe; vfree(kva_elem->kva); } /* Iterate through the DMA meminfo queue */ list_for_each(dm_qe, &dma_info->qe) { dma_elem = (struct bfa_mem_dma_s *) dm_qe; dma_free_coherent(&bfad->pcidev->dev, dma_elem->mem_len, dma_elem->kva, (dma_addr_t) dma_elem->dma); } memset(hal_meminfo, 0, sizeof(struct bfa_meminfo_s)); } void bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg) { if (num_rports > 0) bfa_cfg->fwcfg.num_rports = num_rports; if (num_ios > 0) bfa_cfg->fwcfg.num_ioim_reqs = num_ios; if (num_tms > 0) bfa_cfg->fwcfg.num_tskim_reqs = num_tms; if (num_fcxps > 0 && num_fcxps <= BFA_FCXP_MAX) bfa_cfg->fwcfg.num_fcxp_reqs = num_fcxps; if (num_ufbufs > 0 && num_ufbufs <= BFA_UF_MAX) bfa_cfg->fwcfg.num_uf_bufs = num_ufbufs; if (reqq_size > 0) bfa_cfg->drvcfg.num_reqq_elems = reqq_size; if (rspq_size > 0) bfa_cfg->drvcfg.num_rspq_elems = rspq_size; if (num_sgpgs > 0 && num_sgpgs <= BFA_SGPG_MAX) bfa_cfg->drvcfg.num_sgpgs = num_sgpgs; /* * populate the hal values back to the driver for sysfs use. * otherwise, the default values will be shown as 0 in sysfs */ num_rports = bfa_cfg->fwcfg.num_rports; num_ios = bfa_cfg->fwcfg.num_ioim_reqs; num_tms = bfa_cfg->fwcfg.num_tskim_reqs; num_fcxps = bfa_cfg->fwcfg.num_fcxp_reqs; num_ufbufs = bfa_cfg->fwcfg.num_uf_bufs; reqq_size = bfa_cfg->drvcfg.num_reqq_elems; rspq_size = bfa_cfg->drvcfg.num_rspq_elems; num_sgpgs = bfa_cfg->drvcfg.num_sgpgs; } bfa_status_t bfad_hal_mem_alloc(struct bfad_s *bfad) { struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo; struct bfa_mem_dma_s *dma_info, *dma_elem; struct bfa_mem_kva_s *kva_info, *kva_elem; struct list_head *dm_qe, *km_qe; bfa_status_t rc = BFA_STATUS_OK; dma_addr_t phys_addr; bfa_cfg_get_default(&bfad->ioc_cfg); bfad_update_hal_cfg(&bfad->ioc_cfg); bfad->cfg_data.ioc_queue_depth = bfad->ioc_cfg.fwcfg.num_ioim_reqs; bfa_cfg_get_meminfo(&bfad->ioc_cfg, hal_meminfo, &bfad->bfa); dma_info = &hal_meminfo->dma_info; kva_info = &hal_meminfo->kva_info; /* Iterate through the KVA meminfo queue */ list_for_each(km_qe, &kva_info->qe) { kva_elem = (struct bfa_mem_kva_s *) km_qe; kva_elem->kva = vzalloc(kva_elem->mem_len); if (kva_elem->kva == NULL) { bfad_hal_mem_release(bfad); rc = BFA_STATUS_ENOMEM; goto ext; } } /* Iterate through the DMA meminfo queue */ list_for_each(dm_qe, &dma_info->qe) { dma_elem = (struct bfa_mem_dma_s *) dm_qe; dma_elem->kva = dma_alloc_coherent(&bfad->pcidev->dev, dma_elem->mem_len, &phys_addr, GFP_KERNEL); if (dma_elem->kva == NULL) { bfad_hal_mem_release(bfad); rc = BFA_STATUS_ENOMEM; goto ext; } dma_elem->dma = phys_addr; memset(dma_elem->kva, 0, dma_elem->mem_len); } ext: return rc; } /* * Create a vport under a vf. */ bfa_status_t bfad_vport_create(struct bfad_s *bfad, u16 vf_id, struct bfa_lport_cfg_s *port_cfg, struct device *dev) { struct bfad_vport_s *vport; int rc = BFA_STATUS_OK; unsigned long flags; struct completion fcomp; vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL); if (!vport) { rc = BFA_STATUS_ENOMEM; goto ext; } vport->drv_port.bfad = bfad; spin_lock_irqsave(&bfad->bfad_lock, flags); rc = bfa_fcs_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, vf_id, port_cfg, vport); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (rc != BFA_STATUS_OK) goto ext_free_vport; if (port_cfg->roles & BFA_LPORT_ROLE_FCP_IM) { rc = bfad_im_scsi_host_alloc(bfad, vport->drv_port.im_port, dev); if (rc != BFA_STATUS_OK) goto ext_free_fcs_vport; } spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_fcs_vport_start(&vport->fcs_vport); list_add_tail(&vport->list_entry, &bfad->vport_list); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return BFA_STATUS_OK; ext_free_fcs_vport: spin_lock_irqsave(&bfad->bfad_lock, flags); vport->comp_del = &fcomp; init_completion(vport->comp_del); bfa_fcs_vport_delete(&vport->fcs_vport); spin_unlock_irqrestore(&bfad->bfad_lock, flags); wait_for_completion(vport->comp_del); ext_free_vport: kfree(vport); ext: return rc; } void bfad_bfa_tmo(struct timer_list *t) { struct bfad_s *bfad = from_timer(bfad, t, hal_tmo); unsigned long flags; struct list_head doneq; spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_timer_beat(&bfad->bfa.timer_mod); bfa_comp_deq(&bfad->bfa, &doneq); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (!list_empty(&doneq)) { bfa_comp_process(&bfad->bfa, &doneq); spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_comp_free(&bfad->bfa, &doneq); spin_unlock_irqrestore(&bfad->bfad_lock, flags); } mod_timer(&bfad->hal_tmo, jiffies + msecs_to_jiffies(BFA_TIMER_FREQ)); } void bfad_init_timer(struct bfad_s *bfad) { timer_setup(&bfad->hal_tmo, bfad_bfa_tmo, 0); mod_timer(&bfad->hal_tmo, jiffies + msecs_to_jiffies(BFA_TIMER_FREQ)); } int bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad) { int rc = -ENODEV; if (pci_enable_device(pdev)) { printk(KERN_ERR "pci_enable_device fail %p\n", pdev); goto out; } if (pci_request_regions(pdev, BFAD_DRIVER_NAME)) goto out_disable_device; pci_set_master(pdev); rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (rc) { rc = -ENODEV; printk(KERN_ERR "dma_set_mask_and_coherent fail %p\n", pdev); goto out_release_region; } bfad->pci_bar0_kva = pci_iomap(pdev, 0, pci_resource_len(pdev, 0)); bfad->pci_bar2_kva = pci_iomap(pdev, 2, pci_resource_len(pdev, 2)); if (bfad->pci_bar0_kva == NULL) { printk(KERN_ERR "Fail to map bar0\n"); rc = -ENODEV; goto out_release_region; } bfad->hal_pcidev.pci_slot = PCI_SLOT(pdev->devfn); bfad->hal_pcidev.pci_func = PCI_FUNC(pdev->devfn); bfad->hal_pcidev.pci_bar_kva = bfad->pci_bar0_kva; bfad->hal_pcidev.device_id = pdev->device; bfad->hal_pcidev.ssid = pdev->subsystem_device; bfad->pci_name = pci_name(pdev); bfad->pci_attr.vendor_id = pdev->vendor; bfad->pci_attr.device_id = pdev->device; bfad->pci_attr.ssid = pdev->subsystem_device; bfad->pci_attr.ssvid = pdev->subsystem_vendor; bfad->pci_attr.pcifn = PCI_FUNC(pdev->devfn); bfad->pcidev = pdev; /* Adjust PCIe Maximum Read Request Size */ if (pci_is_pcie(pdev) && pcie_max_read_reqsz) { if (pcie_max_read_reqsz >= 128 && pcie_max_read_reqsz <= 4096 && is_power_of_2(pcie_max_read_reqsz)) { int max_rq = pcie_get_readrq(pdev); printk(KERN_WARNING "BFA[%s]: " "pcie_max_read_request_size is %d, " "reset to %d\n", bfad->pci_name, max_rq, pcie_max_read_reqsz); pcie_set_readrq(pdev, pcie_max_read_reqsz); } else { printk(KERN_WARNING "BFA[%s]: invalid " "pcie_max_read_request_size %d ignored\n", bfad->pci_name, pcie_max_read_reqsz); } } pci_save_state(pdev); return 0; out_release_region: pci_release_regions(pdev); out_disable_device: pci_disable_device(pdev); out: return rc; } void bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad) { pci_iounmap(pdev, bfad->pci_bar0_kva); pci_iounmap(pdev, bfad->pci_bar2_kva); pci_release_regions(pdev); pci_disable_device(pdev); } bfa_status_t bfad_drv_init(struct bfad_s *bfad) { bfa_status_t rc; unsigned long flags; bfad->cfg_data.rport_del_timeout = rport_del_timeout; bfad->cfg_data.lun_queue_depth = bfa_lun_queue_depth; bfad->cfg_data.io_max_sge = bfa_io_max_sge; bfad->cfg_data.binding_method = FCP_PWWN_BINDING; rc = bfad_hal_mem_alloc(bfad); if (rc != BFA_STATUS_OK) { printk(KERN_WARNING "bfad%d bfad_hal_mem_alloc failure\n", bfad->inst_no); printk(KERN_WARNING "Not enough memory to attach all QLogic BR-series HBA ports. System may need more memory.\n"); return BFA_STATUS_FAILED; } bfad->bfa.trcmod = bfad->trcmod; bfad->bfa.plog = &bfad->plog_buf; bfa_plog_init(&bfad->plog_buf); bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START, 0, "Driver Attach"); bfa_attach(&bfad->bfa, bfad, &bfad->ioc_cfg, &bfad->meminfo, &bfad->hal_pcidev); /* FCS INIT */ spin_lock_irqsave(&bfad->bfad_lock, flags); bfad->bfa_fcs.trcmod = bfad->trcmod; bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE); bfad->bfa_fcs.fdmi_enabled = fdmi_enable; spin_unlock_irqrestore(&bfad->bfad_lock, flags); bfad->bfad_flags |= BFAD_DRV_INIT_DONE; return BFA_STATUS_OK; } void bfad_drv_uninit(struct bfad_s *bfad) { unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); init_completion(&bfad->comp); bfa_iocfc_stop(&bfad->bfa); spin_unlock_irqrestore(&bfad->bfad_lock, flags); wait_for_completion(&bfad->comp); del_timer_sync(&bfad->hal_tmo); bfa_isr_disable(&bfad->bfa); bfa_detach(&bfad->bfa); bfad_remove_intr(bfad); bfad_hal_mem_release(bfad); bfad->bfad_flags &= ~BFAD_DRV_INIT_DONE; } void bfad_drv_start(struct bfad_s *bfad) { unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_iocfc_start(&bfad->bfa); bfa_fcs_pbc_vport_init(&bfad->bfa_fcs); bfa_fcs_fabric_modstart(&bfad->bfa_fcs); bfad->bfad_flags |= BFAD_HAL_START_DONE; spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (bfad->im) flush_workqueue(bfad->im->drv_workq); } void bfad_fcs_stop(struct bfad_s *bfad) { unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); init_completion(&bfad->comp); bfad->pport.flags |= BFAD_PORT_DELETE; bfa_fcs_exit(&bfad->bfa_fcs); spin_unlock_irqrestore(&bfad->bfad_lock, flags); wait_for_completion(&bfad->comp); bfa_sm_send_event(bfad, BFAD_E_FCS_EXIT_COMP); } void bfad_stop(struct bfad_s *bfad) { unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); init_completion(&bfad->comp); bfa_iocfc_stop(&bfad->bfa); bfad->bfad_flags &= ~BFAD_HAL_START_DONE; spin_unlock_irqrestore(&bfad->bfad_lock, flags); wait_for_completion(&bfad->comp); bfa_sm_send_event(bfad, BFAD_E_EXIT_COMP); } bfa_status_t bfad_cfg_pport(struct bfad_s *bfad, enum bfa_lport_role role) { int rc = BFA_STATUS_OK; /* Allocate scsi_host for the physical port */ if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) && (role & BFA_LPORT_ROLE_FCP_IM)) { if (bfad->pport.im_port == NULL) { rc = BFA_STATUS_FAILED; goto out; } rc = bfad_im_scsi_host_alloc(bfad, bfad->pport.im_port, &bfad->pcidev->dev); if (rc != BFA_STATUS_OK) goto out; bfad->pport.roles |= BFA_LPORT_ROLE_FCP_IM; } bfad->bfad_flags |= BFAD_CFG_PPORT_DONE; out: return rc; } void bfad_uncfg_pport(struct bfad_s *bfad) { if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) && (bfad->pport.roles & BFA_LPORT_ROLE_FCP_IM)) { bfad_im_scsi_host_free(bfad, bfad->pport.im_port); bfad_im_port_clean(bfad->pport.im_port); kfree(bfad->pport.im_port); bfad->pport.roles &= ~BFA_LPORT_ROLE_FCP_IM; } bfad->bfad_flags &= ~BFAD_CFG_PPORT_DONE; } bfa_status_t bfad_start_ops(struct bfad_s *bfad) { int retval; unsigned long flags; struct bfad_vport_s *vport, *vport_new; struct bfa_fcs_driver_info_s driver_info; /* Limit min/max. xfer size to [64k-32MB] */ if (max_xfer_size < BFAD_MIN_SECTORS >> 1) max_xfer_size = BFAD_MIN_SECTORS >> 1; if (max_xfer_size > BFAD_MAX_SECTORS >> 1) max_xfer_size = BFAD_MAX_SECTORS >> 1; /* Fill the driver_info info to fcs*/ memset(&driver_info, 0, sizeof(driver_info)); strscpy(driver_info.version, BFAD_DRIVER_VERSION, sizeof(driver_info.version)); if (host_name) strscpy(driver_info.host_machine_name, host_name, sizeof(driver_info.host_machine_name)); if (os_name) strscpy(driver_info.host_os_name, os_name, sizeof(driver_info.host_os_name)); if (os_patch) strscpy(driver_info.host_os_patch, os_patch, sizeof(driver_info.host_os_patch)); strscpy(driver_info.os_device_name, bfad->pci_name, sizeof(driver_info.os_device_name)); /* FCS driver info init */ spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info); if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE) bfa_fcs_update_cfg(&bfad->bfa_fcs); else bfa_fcs_init(&bfad->bfa_fcs); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (!(bfad->bfad_flags & BFAD_CFG_PPORT_DONE)) { retval = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM); if (retval != BFA_STATUS_OK) return BFA_STATUS_FAILED; } /* Setup fc host fixed attribute if the lk supports */ bfad_fc_host_init(bfad->pport.im_port); /* BFAD level FC4 IM specific resource allocation */ retval = bfad_im_probe(bfad); if (retval != BFA_STATUS_OK) { printk(KERN_WARNING "bfad_im_probe failed\n"); if (bfa_sm_cmp_state(bfad, bfad_sm_initializing)) bfa_sm_set_state(bfad, bfad_sm_failed); return BFA_STATUS_FAILED; } else bfad->bfad_flags |= BFAD_FC4_PROBE_DONE; bfad_drv_start(bfad); /* Complete pbc vport create */ list_for_each_entry_safe(vport, vport_new, &bfad->pbc_vport_list, list_entry) { struct fc_vport_identifiers vid; struct fc_vport *fc_vport; char pwwn_buf[BFA_STRING_32]; memset(&vid, 0, sizeof(vid)); vid.roles = FC_PORT_ROLE_FCP_INITIATOR; vid.vport_type = FC_PORTTYPE_NPIV; vid.disable = false; vid.node_name = wwn_to_u64((u8 *) (&((vport->fcs_vport).lport.port_cfg.nwwn))); vid.port_name = wwn_to_u64((u8 *) (&((vport->fcs_vport).lport.port_cfg.pwwn))); fc_vport = fc_vport_create(bfad->pport.im_port->shost, 0, &vid); if (!fc_vport) { wwn2str(pwwn_buf, vid.port_name); printk(KERN_WARNING "bfad%d: failed to create pbc vport" " %s\n", bfad->inst_no, pwwn_buf); } list_del(&vport->list_entry); kfree(vport); } /* * If bfa_linkup_delay is set to -1 default; try to retrive the * value using the bfad_get_linkup_delay(); else use the * passed in module param value as the bfa_linkup_delay. */ if (bfa_linkup_delay < 0) { bfa_linkup_delay = bfad_get_linkup_delay(bfad); bfad_rport_online_wait(bfad); bfa_linkup_delay = -1; } else bfad_rport_online_wait(bfad); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "bfa device claimed\n"); return BFA_STATUS_OK; } int bfad_worker(void *ptr) { struct bfad_s *bfad = ptr; unsigned long flags; if (kthread_should_stop()) return 0; /* Send event BFAD_E_INIT_SUCCESS */ bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS); spin_lock_irqsave(&bfad->bfad_lock, flags); bfad->bfad_tsk = NULL; spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } /* * BFA driver interrupt functions */ irqreturn_t bfad_intx(int irq, void *dev_id) { struct bfad_s *bfad = dev_id; struct list_head doneq; unsigned long flags; bfa_boolean_t rc; spin_lock_irqsave(&bfad->bfad_lock, flags); rc = bfa_intx(&bfad->bfa); if (!rc) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); return IRQ_NONE; } bfa_comp_deq(&bfad->bfa, &doneq); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (!list_empty(&doneq)) { bfa_comp_process(&bfad->bfa, &doneq); spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_comp_free(&bfad->bfa, &doneq); spin_unlock_irqrestore(&bfad->bfad_lock, flags); } return IRQ_HANDLED; } static irqreturn_t bfad_msix(int irq, void *dev_id) { struct bfad_msix_s *vec = dev_id; struct bfad_s *bfad = vec->bfad; struct list_head doneq; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_msix(&bfad->bfa, vec->msix.entry); bfa_comp_deq(&bfad->bfa, &doneq); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (!list_empty(&doneq)) { bfa_comp_process(&bfad->bfa, &doneq); spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_comp_free(&bfad->bfa, &doneq); spin_unlock_irqrestore(&bfad->bfad_lock, flags); } return IRQ_HANDLED; } /* * Initialize the MSIX entry table. */ static void bfad_init_msix_entry(struct bfad_s *bfad, struct msix_entry *msix_entries, int mask, int max_bit) { int i; int match = 0x00000001; for (i = 0, bfad->nvec = 0; i < MAX_MSIX_ENTRY; i++) { if (mask & match) { bfad->msix_tab[bfad->nvec].msix.entry = i; bfad->msix_tab[bfad->nvec].bfad = bfad; msix_entries[bfad->nvec].entry = i; bfad->nvec++; } match <<= 1; } } int bfad_install_msix_handler(struct bfad_s *bfad) { int i, error = 0; for (i = 0; i < bfad->nvec; i++) { sprintf(bfad->msix_tab[i].name, "bfa-%s-%s", bfad->pci_name, ((bfa_asic_id_cb(bfad->hal_pcidev.device_id)) ? msix_name_cb[i] : msix_name_ct[i])); error = request_irq(bfad->msix_tab[i].msix.vector, (irq_handler_t) bfad_msix, 0, bfad->msix_tab[i].name, &bfad->msix_tab[i]); bfa_trc(bfad, i); bfa_trc(bfad, bfad->msix_tab[i].msix.vector); if (error) { int j; for (j = 0; j < i; j++) free_irq(bfad->msix_tab[j].msix.vector, &bfad->msix_tab[j]); bfad->bfad_flags &= ~BFAD_MSIX_ON; pci_disable_msix(bfad->pcidev); return 1; } } return 0; } /* * Setup MSIX based interrupt. */ int bfad_setup_intr(struct bfad_s *bfad) { int error; u32 mask = 0, i, num_bit = 0, max_bit = 0; struct msix_entry msix_entries[MAX_MSIX_ENTRY]; struct pci_dev *pdev = bfad->pcidev; u16 reg; /* Call BFA to get the msix map for this PCI function. */ bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit); /* Set up the msix entry table */ bfad_init_msix_entry(bfad, msix_entries, mask, max_bit); if ((bfa_asic_id_ctc(pdev->device) && !msix_disable_ct) || (bfa_asic_id_cb(pdev->device) && !msix_disable_cb)) { error = pci_enable_msix_exact(bfad->pcidev, msix_entries, bfad->nvec); /* In CT1 & CT2, try to allocate just one vector */ if (error == -ENOSPC && bfa_asic_id_ctc(pdev->device)) { printk(KERN_WARNING "bfa %s: trying one msix " "vector failed to allocate %d[%d]\n", bfad->pci_name, bfad->nvec, error); bfad->nvec = 1; error = pci_enable_msix_exact(bfad->pcidev, msix_entries, 1); } if (error) { printk(KERN_WARNING "bfad%d: " "pci_enable_msix_exact failed (%d), " "use line based.\n", bfad->inst_no, error); goto line_based; } /* Disable INTX in MSI-X mode */ pci_read_config_word(pdev, PCI_COMMAND, &reg); if (!(reg & PCI_COMMAND_INTX_DISABLE)) pci_write_config_word(pdev, PCI_COMMAND, reg | PCI_COMMAND_INTX_DISABLE); /* Save the vectors */ for (i = 0; i < bfad->nvec; i++) { bfa_trc(bfad, msix_entries[i].vector); bfad->msix_tab[i].msix.vector = msix_entries[i].vector; } bfa_msix_init(&bfad->bfa, bfad->nvec); bfad->bfad_flags |= BFAD_MSIX_ON; return 0; } line_based: error = request_irq(bfad->pcidev->irq, (irq_handler_t)bfad_intx, BFAD_IRQ_FLAGS, BFAD_DRIVER_NAME, bfad); if (error) return error; bfad->bfad_flags |= BFAD_INTX_ON; return 0; } void bfad_remove_intr(struct bfad_s *bfad) { int i; if (bfad->bfad_flags & BFAD_MSIX_ON) { for (i = 0; i < bfad->nvec; i++) free_irq(bfad->msix_tab[i].msix.vector, &bfad->msix_tab[i]); pci_disable_msix(bfad->pcidev); bfad->bfad_flags &= ~BFAD_MSIX_ON; } else if (bfad->bfad_flags & BFAD_INTX_ON) { free_irq(bfad->pcidev->irq, bfad); } } /* * PCI probe entry. */ int bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) { struct bfad_s *bfad; int error = -ENODEV, retval, i; /* For single port cards - only claim function 0 */ if ((pdev->device == BFA_PCI_DEVICE_ID_FC_8G1P) && (PCI_FUNC(pdev->devfn) != 0)) return -ENODEV; bfad = kzalloc(sizeof(struct bfad_s), GFP_KERNEL); if (!bfad) { error = -ENOMEM; goto out; } bfad->trcmod = kzalloc(sizeof(struct bfa_trc_mod_s), GFP_KERNEL); if (!bfad->trcmod) { printk(KERN_WARNING "Error alloc trace buffer!\n"); error = -ENOMEM; goto out_alloc_trace_failure; } /* TRACE INIT */ bfa_trc_init(bfad->trcmod); bfa_trc(bfad, bfad_inst); /* AEN INIT */ INIT_LIST_HEAD(&bfad->free_aen_q); INIT_LIST_HEAD(&bfad->active_aen_q); for (i = 0; i < BFA_AEN_MAX_ENTRY; i++) list_add_tail(&bfad->aen_list[i].qe, &bfad->free_aen_q); if (!(bfad_load_fwimg(pdev))) { kfree(bfad->trcmod); goto out_alloc_trace_failure; } retval = bfad_pci_init(pdev, bfad); if (retval) { printk(KERN_WARNING "bfad_pci_init failure!\n"); error = retval; goto out_pci_init_failure; } mutex_lock(&bfad_mutex); bfad->inst_no = bfad_inst++; list_add_tail(&bfad->list_entry, &bfad_list); mutex_unlock(&bfad_mutex); /* Initializing the state machine: State set to uninit */ bfa_sm_set_state(bfad, bfad_sm_uninit); spin_lock_init(&bfad->bfad_lock); spin_lock_init(&bfad->bfad_aen_spinlock); pci_set_drvdata(pdev, bfad); bfad->ref_count = 0; bfad->pport.bfad = bfad; INIT_LIST_HEAD(&bfad->pbc_vport_list); INIT_LIST_HEAD(&bfad->vport_list); /* Setup the debugfs node for this bfad */ if (bfa_debugfs_enable) bfad_debugfs_init(&bfad->pport); retval = bfad_drv_init(bfad); if (retval != BFA_STATUS_OK) goto out_drv_init_failure; bfa_sm_send_event(bfad, BFAD_E_CREATE); if (bfa_sm_cmp_state(bfad, bfad_sm_uninit)) goto out_bfad_sm_failure; return 0; out_bfad_sm_failure: bfad_hal_mem_release(bfad); out_drv_init_failure: /* Remove the debugfs node for this bfad */ kfree(bfad->regdata); bfad_debugfs_exit(&bfad->pport); mutex_lock(&bfad_mutex); bfad_inst--; list_del(&bfad->list_entry); mutex_unlock(&bfad_mutex); bfad_pci_uninit(pdev, bfad); out_pci_init_failure: kfree(bfad->trcmod); out_alloc_trace_failure: kfree(bfad); out: return error; } /* * PCI remove entry. */ void bfad_pci_remove(struct pci_dev *pdev) { struct bfad_s *bfad = pci_get_drvdata(pdev); unsigned long flags; bfa_trc(bfad, bfad->inst_no); spin_lock_irqsave(&bfad->bfad_lock, flags); if (bfad->bfad_tsk != NULL) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); kthread_stop(bfad->bfad_tsk); } else { spin_unlock_irqrestore(&bfad->bfad_lock, flags); } /* Send Event BFAD_E_STOP */ bfa_sm_send_event(bfad, BFAD_E_STOP); /* Driver detach and dealloc mem */ spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_detach(&bfad->bfa); spin_unlock_irqrestore(&bfad->bfad_lock, flags); bfad_hal_mem_release(bfad); /* Remove the debugfs node for this bfad */ kfree(bfad->regdata); bfad_debugfs_exit(&bfad->pport); /* Cleaning the BFAD instance */ mutex_lock(&bfad_mutex); bfad_inst--; list_del(&bfad->list_entry); mutex_unlock(&bfad_mutex); bfad_pci_uninit(pdev, bfad); kfree(bfad->trcmod); kfree(bfad); } /* * PCI Error Recovery entry, error detected. */ static pci_ers_result_t bfad_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct bfad_s *bfad = pci_get_drvdata(pdev); unsigned long flags; pci_ers_result_t ret = PCI_ERS_RESULT_NONE; dev_printk(KERN_ERR, &pdev->dev, "error detected state: %d - flags: 0x%x\n", state, bfad->bfad_flags); switch (state) { case pci_channel_io_normal: /* non-fatal error */ spin_lock_irqsave(&bfad->bfad_lock, flags); bfad->bfad_flags &= ~BFAD_EEH_BUSY; /* Suspend/fail all bfa operations */ bfa_ioc_suspend(&bfad->bfa.ioc); spin_unlock_irqrestore(&bfad->bfad_lock, flags); del_timer_sync(&bfad->hal_tmo); ret = PCI_ERS_RESULT_CAN_RECOVER; break; case pci_channel_io_frozen: /* fatal error */ init_completion(&bfad->comp); spin_lock_irqsave(&bfad->bfad_lock, flags); bfad->bfad_flags |= BFAD_EEH_BUSY; /* Suspend/fail all bfa operations */ bfa_ioc_suspend(&bfad->bfa.ioc); bfa_fcs_stop(&bfad->bfa_fcs); spin_unlock_irqrestore(&bfad->bfad_lock, flags); wait_for_completion(&bfad->comp); bfad_remove_intr(bfad); del_timer_sync(&bfad->hal_tmo); pci_disable_device(pdev); ret = PCI_ERS_RESULT_NEED_RESET; break; case pci_channel_io_perm_failure: /* PCI Card is DEAD */ spin_lock_irqsave(&bfad->bfad_lock, flags); bfad->bfad_flags |= BFAD_EEH_BUSY | BFAD_EEH_PCI_CHANNEL_IO_PERM_FAILURE; spin_unlock_irqrestore(&bfad->bfad_lock, flags); /* If the error_detected handler is called with the reason * pci_channel_io_perm_failure - it will subsequently call * pci_remove() entry point to remove the pci device from the * system - So defer the cleanup to pci_remove(); cleaning up * here causes inconsistent state during pci_remove(). */ ret = PCI_ERS_RESULT_DISCONNECT; break; default: WARN_ON(1); } return ret; } static int restart_bfa(struct bfad_s *bfad) { unsigned long flags; struct pci_dev *pdev = bfad->pcidev; bfa_attach(&bfad->bfa, bfad, &bfad->ioc_cfg, &bfad->meminfo, &bfad->hal_pcidev); /* Enable Interrupt and wait bfa_init completion */ if (bfad_setup_intr(bfad)) { dev_printk(KERN_WARNING, &pdev->dev, "%s: bfad_setup_intr failed\n", bfad->pci_name); bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED); return -1; } init_completion(&bfad->comp); spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_iocfc_init(&bfad->bfa); spin_unlock_irqrestore(&bfad->bfad_lock, flags); /* Set up interrupt handler for each vectors */ if ((bfad->bfad_flags & BFAD_MSIX_ON) && bfad_install_msix_handler(bfad)) dev_printk(KERN_WARNING, &pdev->dev, "%s: install_msix failed.\n", bfad->pci_name); bfad_init_timer(bfad); wait_for_completion(&bfad->comp); bfad_drv_start(bfad); return 0; } /* * PCI Error Recovery entry, re-initialize the chip. */ static pci_ers_result_t bfad_pci_slot_reset(struct pci_dev *pdev) { struct bfad_s *bfad = pci_get_drvdata(pdev); u8 byte; int rc; dev_printk(KERN_ERR, &pdev->dev, "bfad_pci_slot_reset flags: 0x%x\n", bfad->bfad_flags); if (pci_enable_device(pdev)) { dev_printk(KERN_ERR, &pdev->dev, "Cannot re-enable " "PCI device after reset.\n"); return PCI_ERS_RESULT_DISCONNECT; } pci_restore_state(pdev); /* * Read some byte (e.g. DMA max. payload size which can't * be 0xff any time) to make sure - we did not hit another PCI error * in the middle of recovery. If we did, then declare permanent failure. */ pci_read_config_byte(pdev, 0x68, &byte); if (byte == 0xff) { dev_printk(KERN_ERR, &pdev->dev, "slot_reset failed ... got another PCI error !\n"); goto out_disable_device; } pci_save_state(pdev); pci_set_master(pdev); rc = dma_set_mask_and_coherent(&bfad->pcidev->dev, DMA_BIT_MASK(64)); if (rc) goto out_disable_device; if (restart_bfa(bfad) == -1) goto out_disable_device; dev_printk(KERN_WARNING, &pdev->dev, "slot_reset completed flags: 0x%x!\n", bfad->bfad_flags); return PCI_ERS_RESULT_RECOVERED; out_disable_device: pci_disable_device(pdev); return PCI_ERS_RESULT_DISCONNECT; } static pci_ers_result_t bfad_pci_mmio_enabled(struct pci_dev *pdev) { unsigned long flags; struct bfad_s *bfad = pci_get_drvdata(pdev); dev_printk(KERN_INFO, &pdev->dev, "mmio_enabled\n"); /* Fetch FW diagnostic information */ bfa_ioc_debug_save_ftrc(&bfad->bfa.ioc); /* Cancel all pending IOs */ spin_lock_irqsave(&bfad->bfad_lock, flags); init_completion(&bfad->comp); bfa_fcs_stop(&bfad->bfa_fcs); spin_unlock_irqrestore(&bfad->bfad_lock, flags); wait_for_completion(&bfad->comp); bfad_remove_intr(bfad); del_timer_sync(&bfad->hal_tmo); pci_disable_device(pdev); return PCI_ERS_RESULT_NEED_RESET; } static void bfad_pci_resume(struct pci_dev *pdev) { unsigned long flags; struct bfad_s *bfad = pci_get_drvdata(pdev); dev_printk(KERN_WARNING, &pdev->dev, "resume\n"); /* wait until the link is online */ bfad_rport_online_wait(bfad); spin_lock_irqsave(&bfad->bfad_lock, flags); bfad->bfad_flags &= ~BFAD_EEH_BUSY; spin_unlock_irqrestore(&bfad->bfad_lock, flags); } struct pci_device_id bfad_id_table[] = { { .vendor = BFA_PCI_VENDOR_ID_BROCADE, .device = BFA_PCI_DEVICE_ID_FC_8G2P, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .vendor = BFA_PCI_VENDOR_ID_BROCADE, .device = BFA_PCI_DEVICE_ID_FC_8G1P, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .vendor = BFA_PCI_VENDOR_ID_BROCADE, .device = BFA_PCI_DEVICE_ID_CT, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .class = (PCI_CLASS_SERIAL_FIBER << 8), .class_mask = ~0, }, { .vendor = BFA_PCI_VENDOR_ID_BROCADE, .device = BFA_PCI_DEVICE_ID_CT_FC, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .class = (PCI_CLASS_SERIAL_FIBER << 8), .class_mask = ~0, }, { .vendor = BFA_PCI_VENDOR_ID_BROCADE, .device = BFA_PCI_DEVICE_ID_CT2, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .class = (PCI_CLASS_SERIAL_FIBER << 8), .class_mask = ~0, }, { .vendor = BFA_PCI_VENDOR_ID_BROCADE, .device = BFA_PCI_DEVICE_ID_CT2_QUAD, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .class = (PCI_CLASS_SERIAL_FIBER << 8), .class_mask = ~0, }, {0, 0}, }; MODULE_DEVICE_TABLE(pci, bfad_id_table); /* * PCI error recovery handlers. */ static struct pci_error_handlers bfad_err_handler = { .error_detected = bfad_pci_error_detected, .slot_reset = bfad_pci_slot_reset, .mmio_enabled = bfad_pci_mmio_enabled, .resume = bfad_pci_resume, }; static struct pci_driver bfad_pci_driver = { .name = BFAD_DRIVER_NAME, .id_table = bfad_id_table, .probe = bfad_pci_probe, .remove = bfad_pci_remove, .err_handler = &bfad_err_handler, }; /* * Driver module init. */ static int __init bfad_init(void) { int error = 0; pr_info("QLogic BR-series BFA FC/FCOE SCSI driver - version: %s\n", BFAD_DRIVER_VERSION); if (num_sgpgs > 0) num_sgpgs_parm = num_sgpgs; error = bfad_im_module_init(); if (error) { error = -ENOMEM; printk(KERN_WARNING "bfad_im_module_init failure\n"); goto ext; } if (strcmp(FCPI_NAME, " fcpim") == 0) supported_fc4s |= BFA_LPORT_ROLE_FCP_IM; bfa_auto_recover = ioc_auto_recover; bfa_fcs_rport_set_del_timeout(rport_del_timeout); bfa_fcs_rport_set_max_logins(max_rport_logins); error = pci_register_driver(&bfad_pci_driver); if (error) { printk(KERN_WARNING "pci_register_driver failure\n"); goto ext; } return 0; ext: bfad_im_module_exit(); return error; } /* * Driver module exit. */ static void __exit bfad_exit(void) { pci_unregister_driver(&bfad_pci_driver); bfad_im_module_exit(); bfad_free_fwimg(); } /* Firmware handling */ static void bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image, u32 *bfi_image_size, char *fw_name) { const struct firmware *fw; if (request_firmware(&fw, fw_name, &pdev->dev)) { printk(KERN_ALERT "Can't locate firmware %s\n", fw_name); *bfi_image = NULL; goto out; } *bfi_image = vmalloc(fw->size); if (NULL == *bfi_image) { printk(KERN_ALERT "Fail to allocate buffer for fw image " "size=%x!\n", (u32) fw->size); goto out; } memcpy(*bfi_image, fw->data, fw->size); *bfi_image_size = fw->size/sizeof(u32); out: release_firmware(fw); } static u32 * bfad_load_fwimg(struct pci_dev *pdev) { if (bfa_asic_id_ct2(pdev->device)) { if (bfi_image_ct2_size == 0) bfad_read_firmware(pdev, &bfi_image_ct2, &bfi_image_ct2_size, BFAD_FW_FILE_CT2); return bfi_image_ct2; } else if (bfa_asic_id_ct(pdev->device)) { if (bfi_image_ct_size == 0) bfad_read_firmware(pdev, &bfi_image_ct, &bfi_image_ct_size, BFAD_FW_FILE_CT); return bfi_image_ct; } else if (bfa_asic_id_cb(pdev->device)) { if (bfi_image_cb_size == 0) bfad_read_firmware(pdev, &bfi_image_cb, &bfi_image_cb_size, BFAD_FW_FILE_CB); return bfi_image_cb; } return NULL; } static void bfad_free_fwimg(void) { if (bfi_image_ct2_size && bfi_image_ct2) vfree(bfi_image_ct2); if (bfi_image_ct_size && bfi_image_ct) vfree(bfi_image_ct); if (bfi_image_cb_size && bfi_image_cb) vfree(bfi_image_cb); } module_init(bfad_init); module_exit(bfad_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("QLogic BR-series Fibre Channel HBA Driver" BFAD_PROTO_NAME); MODULE_AUTHOR("QLogic Corporation"); MODULE_VERSION(BFAD_DRIVER_VERSION);
linux-master
drivers/scsi/bfa/bfad.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. * Copyright (c) 2014- QLogic Corporation. * All rights reserved * www.qlogic.com * * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. */ #include "bfad_drv.h" #include "bfa_modules.h" #include "bfi_reg.h" void bfa_hwcb_reginit(struct bfa_s *bfa) { struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs; void __iomem *kva = bfa_ioc_bar0(&bfa->ioc); int fn = bfa_ioc_pcifn(&bfa->ioc); if (fn == 0) { bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS); bfa_regs->intr_mask = (kva + HOSTFN0_INT_MSK); } else { bfa_regs->intr_status = (kva + HOSTFN1_INT_STATUS); bfa_regs->intr_mask = (kva + HOSTFN1_INT_MSK); } } static void bfa_hwcb_reqq_ack_msix(struct bfa_s *bfa, int reqq) { writel(__HFN_INT_CPE_Q0 << CPE_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), reqq), bfa->iocfc.bfa_regs.intr_status); } /* * Actions to respond RME Interrupt for Crossbow ASIC: * - Write 1 to Interrupt Status register * INTX - done in bfa_intx() * MSIX - done in bfa_hwcb_rspq_ack_msix() * - Update CI (only if new CI) */ static void bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq, u32 ci) { writel(__HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq), bfa->iocfc.bfa_regs.intr_status); if (bfa_rspq_ci(bfa, rspq) == ci) return; bfa_rspq_ci(bfa, rspq) = ci; writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]); } void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci) { if (bfa_rspq_ci(bfa, rspq) == ci) return; bfa_rspq_ci(bfa, rspq) = ci; writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]); } void bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap, u32 *num_vecs, u32 *max_vec_bit) { #define __HFN_NUMINTS 13 if (bfa_ioc_pcifn(&bfa->ioc) == 0) { *msix_vecs_bmap = (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | __HFN_INT_CPE_Q2 | __HFN_INT_CPE_Q3 | __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 | __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 | __HFN_INT_MBOX_LPU0); *max_vec_bit = __HFN_INT_MBOX_LPU0; } else { *msix_vecs_bmap = (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 | __HFN_INT_CPE_Q6 | __HFN_INT_CPE_Q7 | __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 | __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 | __HFN_INT_MBOX_LPU1); *max_vec_bit = __HFN_INT_MBOX_LPU1; } *msix_vecs_bmap |= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS); *num_vecs = __HFN_NUMINTS; } /* * Dummy interrupt handler for handling spurious interrupts. */ static void bfa_hwcb_msix_dummy(struct bfa_s *bfa, int vec) { } /* * No special setup required for crossbow -- vector assignments are implicit. */ void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs) { WARN_ON((nvecs != 1) && (nvecs != __HFN_NUMINTS)); bfa->msix.nvecs = nvecs; bfa_hwcb_msix_uninstall(bfa); } void bfa_hwcb_msix_ctrl_install(struct bfa_s *bfa) { int i; if (bfa->msix.nvecs == 0) return; if (bfa->msix.nvecs == 1) { for (i = BFI_MSIX_CPE_QMIN_CB; i < BFI_MSIX_CB_MAX; i++) bfa->msix.handler[i] = bfa_msix_all; return; } for (i = BFI_MSIX_RME_QMAX_CB+1; i < BFI_MSIX_CB_MAX; i++) bfa->msix.handler[i] = bfa_msix_lpu_err; } void bfa_hwcb_msix_queue_install(struct bfa_s *bfa) { int i; if (bfa->msix.nvecs == 0) return; if (bfa->msix.nvecs == 1) { for (i = BFI_MSIX_CPE_QMIN_CB; i <= BFI_MSIX_RME_QMAX_CB; i++) bfa->msix.handler[i] = bfa_msix_all; return; } for (i = BFI_MSIX_CPE_QMIN_CB; i <= BFI_MSIX_CPE_QMAX_CB; i++) bfa->msix.handler[i] = bfa_msix_reqq; for (i = BFI_MSIX_RME_QMIN_CB; i <= BFI_MSIX_RME_QMAX_CB; i++) bfa->msix.handler[i] = bfa_msix_rspq; } void bfa_hwcb_msix_uninstall(struct bfa_s *bfa) { int i; for (i = 0; i < BFI_MSIX_CB_MAX; i++) bfa->msix.handler[i] = bfa_hwcb_msix_dummy; } /* * No special enable/disable -- vector assignments are implicit. */ void bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix) { if (msix) { bfa->iocfc.hwif.hw_reqq_ack = bfa_hwcb_reqq_ack_msix; bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack_msix; } else { bfa->iocfc.hwif.hw_reqq_ack = NULL; bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack; } } void bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end) { *start = BFI_MSIX_RME_QMIN_CB; *end = BFI_MSIX_RME_QMAX_CB; }
linux-master
drivers/scsi/bfa/bfa_hw_cb.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. * Copyright (c) 2014- QLogic Corporation. * All rights reserved * www.qlogic.com * * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. */ #include <linux/uaccess.h> #include "bfad_drv.h" #include "bfad_im.h" #include "bfad_bsg.h" BFA_TRC_FILE(LDRV, BSG); static int bfad_iocmd_ioc_enable(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); /* If IOC is not in disabled state - return */ if (!bfa_ioc_is_disabled(&bfad->bfa.ioc)) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; return 0; } init_completion(&bfad->enable_comp); bfa_iocfc_enable(&bfad->bfa); iocmd->status = BFA_STATUS_OK; spin_unlock_irqrestore(&bfad->bfad_lock, flags); wait_for_completion(&bfad->enable_comp); return 0; } static int bfad_iocmd_ioc_disable(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); if (bfa_ioc_is_disabled(&bfad->bfa.ioc)) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; return 0; } if (bfad->disable_active) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); return -EBUSY; } bfad->disable_active = BFA_TRUE; init_completion(&bfad->disable_comp); bfa_iocfc_disable(&bfad->bfa); spin_unlock_irqrestore(&bfad->bfad_lock, flags); wait_for_completion(&bfad->disable_comp); bfad->disable_active = BFA_FALSE; iocmd->status = BFA_STATUS_OK; return 0; } static int bfad_iocmd_ioc_get_info(struct bfad_s *bfad, void *cmd) { int i; struct bfa_bsg_ioc_info_s *iocmd = (struct bfa_bsg_ioc_info_s *)cmd; struct bfad_im_port_s *im_port; struct bfa_port_attr_s pattr; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_fcport_get_attr(&bfad->bfa, &pattr); iocmd->nwwn = pattr.nwwn; iocmd->pwwn = pattr.pwwn; iocmd->ioc_type = bfa_get_type(&bfad->bfa); iocmd->mac = bfa_get_mac(&bfad->bfa); iocmd->factory_mac = bfa_get_mfg_mac(&bfad->bfa); bfa_get_adapter_serial_num(&bfad->bfa, iocmd->serialnum); iocmd->factorynwwn = pattr.factorynwwn; iocmd->factorypwwn = pattr.factorypwwn; iocmd->bfad_num = bfad->inst_no; im_port = bfad->pport.im_port; iocmd->host = im_port->shost->host_no; spin_unlock_irqrestore(&bfad->bfad_lock, flags); strcpy(iocmd->name, bfad->adapter_name); strcpy(iocmd->port_name, bfad->port_name); strcpy(iocmd->hwpath, bfad->pci_name); /* set adapter hw path */ strcpy(iocmd->adapter_hwpath, bfad->pci_name); for (i = 0; iocmd->adapter_hwpath[i] != ':' && i < BFA_STRING_32; i++) ; for (; iocmd->adapter_hwpath[++i] != ':' && i < BFA_STRING_32; ) ; iocmd->adapter_hwpath[i] = '\0'; iocmd->status = BFA_STATUS_OK; return 0; } static int bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_ioc_attr_s *iocmd = (struct bfa_bsg_ioc_attr_s *)cmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_ioc_get_attr(&bfad->bfa.ioc, &iocmd->ioc_attr); spin_unlock_irqrestore(&bfad->bfad_lock, flags); /* fill in driver attr info */ strcpy(iocmd->ioc_attr.driver_attr.driver, BFAD_DRIVER_NAME); strscpy(iocmd->ioc_attr.driver_attr.driver_ver, BFAD_DRIVER_VERSION, BFA_VERSION_LEN); strcpy(iocmd->ioc_attr.driver_attr.fw_ver, iocmd->ioc_attr.adapter_attr.fw_ver); strcpy(iocmd->ioc_attr.driver_attr.bios_ver, iocmd->ioc_attr.adapter_attr.optrom_ver); /* copy chip rev info first otherwise it will be overwritten */ memcpy(bfad->pci_attr.chip_rev, iocmd->ioc_attr.pci_attr.chip_rev, sizeof(bfad->pci_attr.chip_rev)); memcpy(&iocmd->ioc_attr.pci_attr, &bfad->pci_attr, sizeof(struct bfa_ioc_pci_attr_s)); iocmd->status = BFA_STATUS_OK; return 0; } static int bfad_iocmd_ioc_get_stats(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_ioc_stats_s *iocmd = (struct bfa_bsg_ioc_stats_s *)cmd; bfa_ioc_get_stats(&bfad->bfa, &iocmd->ioc_stats); iocmd->status = BFA_STATUS_OK; return 0; } static int bfad_iocmd_ioc_get_fwstats(struct bfad_s *bfad, void *cmd, unsigned int payload_len) { struct bfa_bsg_ioc_fwstats_s *iocmd = (struct bfa_bsg_ioc_fwstats_s *)cmd; void *iocmd_bufptr; unsigned long flags; if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_ioc_fwstats_s), sizeof(struct bfa_fw_stats_s)) != BFA_STATUS_OK) { iocmd->status = BFA_STATUS_VERSION_FAIL; goto out; } iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_ioc_fwstats_s); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_ioc_fw_stats_get(&bfad->bfa.ioc, iocmd_bufptr); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) { bfa_trc(bfad, iocmd->status); goto out; } out: bfa_trc(bfad, 0x6666); return 0; } static int bfad_iocmd_ioc_reset_stats(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) { struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; unsigned long flags; if (v_cmd == IOCMD_IOC_RESET_STATS) { bfa_ioc_clear_stats(&bfad->bfa); iocmd->status = BFA_STATUS_OK; } else if (v_cmd == IOCMD_IOC_RESET_FWSTATS) { spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_ioc_fw_stats_clear(&bfad->bfa.ioc); spin_unlock_irqrestore(&bfad->bfad_lock, flags); } return 0; } static int bfad_iocmd_ioc_set_name(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) { struct bfa_bsg_ioc_name_s *iocmd = (struct bfa_bsg_ioc_name_s *) cmd; if (v_cmd == IOCMD_IOC_SET_ADAPTER_NAME) strcpy(bfad->adapter_name, iocmd->name); else if (v_cmd == IOCMD_IOC_SET_PORT_NAME) strcpy(bfad->port_name, iocmd->name); iocmd->status = BFA_STATUS_OK; return 0; } static int bfad_iocmd_iocfc_get_attr(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_iocfc_attr_s *iocmd = (struct bfa_bsg_iocfc_attr_s *)cmd; iocmd->status = BFA_STATUS_OK; bfa_iocfc_get_attr(&bfad->bfa, &iocmd->iocfc_attr); return 0; } static int bfad_iocmd_ioc_fw_sig_inv(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_ioc_fwsig_invalidate(&bfad->bfa.ioc); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } static int bfad_iocmd_iocfc_set_intr(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_iocfc_intr_s *iocmd = (struct bfa_bsg_iocfc_intr_s *)cmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_iocfc_israttr_set(&bfad->bfa, &iocmd->attr); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } static int bfad_iocmd_port_enable(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_port_enable(&bfad->bfa.modules.port, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) { bfa_trc(bfad, iocmd->status); return 0; } wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; return 0; } static int bfad_iocmd_port_disable(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_port_disable(&bfad->bfa.modules.port, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) { bfa_trc(bfad, iocmd->status); return 0; } wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; return 0; } static int bfad_iocmd_port_get_attr(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_port_attr_s *iocmd = (struct bfa_bsg_port_attr_s *)cmd; struct bfa_lport_attr_s port_attr; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_fcport_get_attr(&bfad->bfa, &iocmd->attr); bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->attr.topology != BFA_PORT_TOPOLOGY_NONE) iocmd->attr.pid = port_attr.pid; else iocmd->attr.pid = 0; iocmd->attr.port_type = port_attr.port_type; iocmd->attr.loopback = port_attr.loopback; iocmd->attr.authfail = port_attr.authfail; strscpy(iocmd->attr.port_symname.symname, port_attr.port_cfg.sym_name.symname, sizeof(iocmd->attr.port_symname.symname)); iocmd->status = BFA_STATUS_OK; return 0; } static int bfad_iocmd_port_get_stats(struct bfad_s *bfad, void *cmd, unsigned int payload_len) { struct bfa_bsg_port_stats_s *iocmd = (struct bfa_bsg_port_stats_s *)cmd; struct bfad_hal_comp fcomp; void *iocmd_bufptr; unsigned long flags; if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_port_stats_s), sizeof(union bfa_port_stats_u)) != BFA_STATUS_OK) { iocmd->status = BFA_STATUS_VERSION_FAIL; return 0; } iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_port_stats_s); init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_port_get_stats(&bfad->bfa.modules.port, iocmd_bufptr, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) { bfa_trc(bfad, iocmd->status); goto out; } wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } static int bfad_iocmd_port_reset_stats(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_port_clear_stats(&bfad->bfa.modules.port, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) { bfa_trc(bfad, iocmd->status); return 0; } wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; return 0; } static int bfad_iocmd_set_port_cfg(struct bfad_s *bfad, void *iocmd, unsigned int v_cmd) { struct bfa_bsg_port_cfg_s *cmd = (struct bfa_bsg_port_cfg_s *)iocmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); if (v_cmd == IOCMD_PORT_CFG_TOPO) cmd->status = bfa_fcport_cfg_topology(&bfad->bfa, cmd->param); else if (v_cmd == IOCMD_PORT_CFG_SPEED) cmd->status = bfa_fcport_cfg_speed(&bfad->bfa, cmd->param); else if (v_cmd == IOCMD_PORT_CFG_ALPA) cmd->status = bfa_fcport_cfg_hardalpa(&bfad->bfa, cmd->param); else if (v_cmd == IOCMD_PORT_CLR_ALPA) cmd->status = bfa_fcport_clr_hardalpa(&bfad->bfa); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } static int bfad_iocmd_port_cfg_maxfrsize(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_port_cfg_maxfrsize_s *iocmd = (struct bfa_bsg_port_cfg_maxfrsize_s *)cmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_fcport_cfg_maxfrsize(&bfad->bfa, iocmd->maxfrsize); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } static int bfad_iocmd_port_cfg_bbcr(struct bfad_s *bfad, unsigned int cmd, void *pcmd) { struct bfa_bsg_bbcr_enable_s *iocmd = (struct bfa_bsg_bbcr_enable_s *)pcmd; unsigned long flags; int rc; spin_lock_irqsave(&bfad->bfad_lock, flags); if (cmd == IOCMD_PORT_BBCR_ENABLE) rc = bfa_fcport_cfg_bbcr(&bfad->bfa, BFA_TRUE, iocmd->bb_scn); else if (cmd == IOCMD_PORT_BBCR_DISABLE) rc = bfa_fcport_cfg_bbcr(&bfad->bfa, BFA_FALSE, 0); else { spin_unlock_irqrestore(&bfad->bfad_lock, flags); return -EINVAL; } spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = rc; return 0; } static int bfad_iocmd_port_get_bbcr_attr(struct bfad_s *bfad, void *pcmd) { struct bfa_bsg_bbcr_attr_s *iocmd = (struct bfa_bsg_bbcr_attr_s *) pcmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_fcport_get_bbcr_attr(&bfad->bfa, &iocmd->attr); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } static int bfad_iocmd_lport_get_attr(struct bfad_s *bfad, void *cmd) { struct bfa_fcs_lport_s *fcs_port; struct bfa_bsg_lport_attr_s *iocmd = (struct bfa_bsg_lport_attr_s *)cmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, iocmd->vf_id, iocmd->pwwn); if (fcs_port == NULL) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_UNKNOWN_LWWN; goto out; } bfa_fcs_lport_get_attr(fcs_port, &iocmd->port_attr); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; out: return 0; } static int bfad_iocmd_lport_get_stats(struct bfad_s *bfad, void *cmd) { struct bfa_fcs_lport_s *fcs_port; struct bfa_bsg_lport_stats_s *iocmd = (struct bfa_bsg_lport_stats_s *)cmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, iocmd->vf_id, iocmd->pwwn); if (fcs_port == NULL) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_UNKNOWN_LWWN; goto out; } bfa_fcs_lport_get_stats(fcs_port, &iocmd->port_stats); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; out: return 0; } static int bfad_iocmd_lport_reset_stats(struct bfad_s *bfad, void *cmd) { struct bfa_fcs_lport_s *fcs_port; struct bfa_bsg_reset_stats_s *iocmd = (struct bfa_bsg_reset_stats_s *)cmd; struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa); struct list_head *qe, *qen; struct bfa_itnim_s *itnim; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, iocmd->vf_id, iocmd->vpwwn); if (fcs_port == NULL) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_UNKNOWN_LWWN; goto out; } bfa_fcs_lport_clear_stats(fcs_port); /* clear IO stats from all active itnims */ list_for_each_safe(qe, qen, &fcpim->itnim_q) { itnim = (struct bfa_itnim_s *) qe; if (itnim->rport->rport_info.lp_tag != fcs_port->lp_tag) continue; bfa_itnim_clear_stats(itnim); } spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; out: return 0; } static int bfad_iocmd_lport_get_iostats(struct bfad_s *bfad, void *cmd) { struct bfa_fcs_lport_s *fcs_port; struct bfa_bsg_lport_iostats_s *iocmd = (struct bfa_bsg_lport_iostats_s *)cmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, iocmd->vf_id, iocmd->pwwn); if (fcs_port == NULL) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_UNKNOWN_LWWN; goto out; } bfa_fcpim_port_iostats(&bfad->bfa, &iocmd->iostats, fcs_port->lp_tag); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; out: return 0; } static int bfad_iocmd_lport_get_rports(struct bfad_s *bfad, void *cmd, unsigned int payload_len) { struct bfa_bsg_lport_get_rports_s *iocmd = (struct bfa_bsg_lport_get_rports_s *)cmd; struct bfa_fcs_lport_s *fcs_port; unsigned long flags; void *iocmd_bufptr; if (iocmd->nrports == 0) return -EINVAL; if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_lport_get_rports_s), sizeof(struct bfa_rport_qualifier_s) * iocmd->nrports) != BFA_STATUS_OK) { iocmd->status = BFA_STATUS_VERSION_FAIL; return 0; } iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_lport_get_rports_s); spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, iocmd->vf_id, iocmd->pwwn); if (fcs_port == NULL) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); bfa_trc(bfad, 0); iocmd->status = BFA_STATUS_UNKNOWN_LWWN; goto out; } bfa_fcs_lport_get_rport_quals(fcs_port, (struct bfa_rport_qualifier_s *)iocmd_bufptr, &iocmd->nrports); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; out: return 0; } static int bfad_iocmd_rport_get_attr(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_rport_attr_s *iocmd = (struct bfa_bsg_rport_attr_s *)cmd; struct bfa_fcs_lport_s *fcs_port; struct bfa_fcs_rport_s *fcs_rport; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, iocmd->vf_id, iocmd->pwwn); if (fcs_port == NULL) { bfa_trc(bfad, 0); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_UNKNOWN_LWWN; goto out; } if (iocmd->pid) fcs_rport = bfa_fcs_lport_get_rport_by_qualifier(fcs_port, iocmd->rpwwn, iocmd->pid); else fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn); if (fcs_rport == NULL) { bfa_trc(bfad, 0); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_UNKNOWN_RWWN; goto out; } bfa_fcs_rport_get_attr(fcs_rport, &iocmd->attr); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; out: return 0; } static int bfad_iocmd_rport_get_addr(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_rport_scsi_addr_s *iocmd = (struct bfa_bsg_rport_scsi_addr_s *)cmd; struct bfa_fcs_lport_s *fcs_port; struct bfa_fcs_itnim_s *fcs_itnim; struct bfad_itnim_s *drv_itnim; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, iocmd->vf_id, iocmd->pwwn); if (fcs_port == NULL) { bfa_trc(bfad, 0); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_UNKNOWN_LWWN; goto out; } fcs_itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn); if (fcs_itnim == NULL) { bfa_trc(bfad, 0); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_UNKNOWN_RWWN; goto out; } drv_itnim = fcs_itnim->itnim_drv; if (drv_itnim && drv_itnim->im_port) iocmd->host = drv_itnim->im_port->shost->host_no; else { bfa_trc(bfad, 0); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_UNKNOWN_RWWN; goto out; } iocmd->target = drv_itnim->scsi_tgt_id; spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->bus = 0; iocmd->lun = 0; iocmd->status = BFA_STATUS_OK; out: return 0; } static int bfad_iocmd_rport_get_stats(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_rport_stats_s *iocmd = (struct bfa_bsg_rport_stats_s *)cmd; struct bfa_fcs_lport_s *fcs_port; struct bfa_fcs_rport_s *fcs_rport; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, iocmd->vf_id, iocmd->pwwn); if (fcs_port == NULL) { bfa_trc(bfad, 0); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_UNKNOWN_LWWN; goto out; } fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn); if (fcs_rport == NULL) { bfa_trc(bfad, 0); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_UNKNOWN_RWWN; goto out; } memcpy((void *)&iocmd->stats, (void *)&fcs_rport->stats, sizeof(struct bfa_rport_stats_s)); if (bfa_fcs_rport_get_halrport(fcs_rport)) { memcpy((void *)&iocmd->stats.hal_stats, (void *)&(bfa_fcs_rport_get_halrport(fcs_rport)->stats), sizeof(struct bfa_rport_hal_stats_s)); } spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; out: return 0; } static int bfad_iocmd_rport_clr_stats(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_rport_reset_stats_s *iocmd = (struct bfa_bsg_rport_reset_stats_s *)cmd; struct bfa_fcs_lport_s *fcs_port; struct bfa_fcs_rport_s *fcs_rport; struct bfa_rport_s *rport; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, iocmd->vf_id, iocmd->pwwn); if (fcs_port == NULL) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_UNKNOWN_LWWN; goto out; } fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn); if (fcs_rport == NULL) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_UNKNOWN_RWWN; goto out; } memset((char *)&fcs_rport->stats, 0, sizeof(struct bfa_rport_stats_s)); rport = bfa_fcs_rport_get_halrport(fcs_rport); if (rport) memset(&rport->stats, 0, sizeof(rport->stats)); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; out: return 0; } static int bfad_iocmd_rport_set_speed(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_rport_set_speed_s *iocmd = (struct bfa_bsg_rport_set_speed_s *)cmd; struct bfa_fcs_lport_s *fcs_port; struct bfa_fcs_rport_s *fcs_rport; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, iocmd->vf_id, iocmd->pwwn); if (fcs_port == NULL) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_UNKNOWN_LWWN; goto out; } fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn); if (fcs_rport == NULL) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_UNKNOWN_RWWN; goto out; } fcs_rport->rpf.assigned_speed = iocmd->speed; /* Set this speed in f/w only if the RPSC speed is not available */ if (fcs_rport->rpf.rpsc_speed == BFA_PORT_SPEED_UNKNOWN) if (fcs_rport->bfa_rport) bfa_rport_speed(fcs_rport->bfa_rport, iocmd->speed); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; out: return 0; } static int bfad_iocmd_vport_get_attr(struct bfad_s *bfad, void *cmd) { struct bfa_fcs_vport_s *fcs_vport; struct bfa_bsg_vport_attr_s *iocmd = (struct bfa_bsg_vport_attr_s *)cmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, iocmd->vf_id, iocmd->vpwwn); if (fcs_vport == NULL) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_UNKNOWN_VWWN; goto out; } bfa_fcs_vport_get_attr(fcs_vport, &iocmd->vport_attr); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; out: return 0; } static int bfad_iocmd_vport_get_stats(struct bfad_s *bfad, void *cmd) { struct bfa_fcs_vport_s *fcs_vport; struct bfa_bsg_vport_stats_s *iocmd = (struct bfa_bsg_vport_stats_s *)cmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, iocmd->vf_id, iocmd->vpwwn); if (fcs_vport == NULL) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_UNKNOWN_VWWN; goto out; } memcpy((void *)&iocmd->vport_stats, (void *)&fcs_vport->vport_stats, sizeof(struct bfa_vport_stats_s)); memcpy((void *)&iocmd->vport_stats.port_stats, (void *)&fcs_vport->lport.stats, sizeof(struct bfa_lport_stats_s)); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; out: return 0; } static int bfad_iocmd_vport_clr_stats(struct bfad_s *bfad, void *cmd) { struct bfa_fcs_vport_s *fcs_vport; struct bfa_bsg_reset_stats_s *iocmd = (struct bfa_bsg_reset_stats_s *)cmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, iocmd->vf_id, iocmd->vpwwn); if (fcs_vport == NULL) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_UNKNOWN_VWWN; goto out; } memset(&fcs_vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s)); memset(&fcs_vport->lport.stats, 0, sizeof(struct bfa_lport_stats_s)); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; out: return 0; } static int bfad_iocmd_fabric_get_lports(struct bfad_s *bfad, void *cmd, unsigned int payload_len) { struct bfa_bsg_fabric_get_lports_s *iocmd = (struct bfa_bsg_fabric_get_lports_s *)cmd; bfa_fcs_vf_t *fcs_vf; uint32_t nports = iocmd->nports; unsigned long flags; void *iocmd_bufptr; if (nports == 0) { iocmd->status = BFA_STATUS_EINVAL; goto out; } if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_fabric_get_lports_s), sizeof(wwn_t) * iocmd->nports) != BFA_STATUS_OK) { iocmd->status = BFA_STATUS_VERSION_FAIL; goto out; } iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_fabric_get_lports_s); spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id); if (fcs_vf == NULL) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_UNKNOWN_VFID; goto out; } bfa_fcs_vf_get_ports(fcs_vf, (wwn_t *)iocmd_bufptr, &nports); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->nports = nports; iocmd->status = BFA_STATUS_OK; out: return 0; } static int bfad_iocmd_qos_set_bw(struct bfad_s *bfad, void *pcmd) { struct bfa_bsg_qos_bw_s *iocmd = (struct bfa_bsg_qos_bw_s *)pcmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_fcport_set_qos_bw(&bfad->bfa, &iocmd->qos_bw); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } static int bfad_iocmd_ratelim(struct bfad_s *bfad, unsigned int cmd, void *pcmd) { struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd; struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; else { if (cmd == IOCMD_RATELIM_ENABLE) fcport->cfg.ratelimit = BFA_TRUE; else if (cmd == IOCMD_RATELIM_DISABLE) fcport->cfg.ratelimit = BFA_FALSE; if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN) fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS; iocmd->status = BFA_STATUS_OK; } spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } static int bfad_iocmd_ratelim_speed(struct bfad_s *bfad, unsigned int cmd, void *pcmd) { struct bfa_bsg_trl_speed_s *iocmd = (struct bfa_bsg_trl_speed_s *)pcmd; struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); /* Auto and speeds greater than the supported speed, are invalid */ if ((iocmd->speed == BFA_PORT_SPEED_AUTO) || (iocmd->speed > fcport->speed_sup)) { iocmd->status = BFA_STATUS_UNSUPP_SPEED; spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; else { fcport->cfg.trl_def_speed = iocmd->speed; iocmd->status = BFA_STATUS_OK; } spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } static int bfad_iocmd_cfg_fcpim(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_fcpim_s *iocmd = (struct bfa_bsg_fcpim_s *)cmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_fcpim_path_tov_set(&bfad->bfa, iocmd->param); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; return 0; } static int bfad_iocmd_fcpim_get_modstats(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_fcpim_modstats_s *iocmd = (struct bfa_bsg_fcpim_modstats_s *)cmd; struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa); struct list_head *qe, *qen; struct bfa_itnim_s *itnim; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); /* accumulate IO stats from itnim */ memset((void *)&iocmd->modstats, 0, sizeof(struct bfa_itnim_iostats_s)); list_for_each_safe(qe, qen, &fcpim->itnim_q) { itnim = (struct bfa_itnim_s *) qe; bfa_fcpim_add_stats(&iocmd->modstats, &(itnim->stats)); } spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; return 0; } static int bfad_iocmd_fcpim_clr_modstats(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_fcpim_modstatsclr_s *iocmd = (struct bfa_bsg_fcpim_modstatsclr_s *)cmd; struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa); struct list_head *qe, *qen; struct bfa_itnim_s *itnim; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); list_for_each_safe(qe, qen, &fcpim->itnim_q) { itnim = (struct bfa_itnim_s *) qe; bfa_itnim_clear_stats(itnim); } memset(&fcpim->del_itn_stats, 0, sizeof(struct bfa_fcpim_del_itn_stats_s)); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; return 0; } static int bfad_iocmd_fcpim_get_del_itn_stats(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_fcpim_del_itn_stats_s *iocmd = (struct bfa_bsg_fcpim_del_itn_stats_s *)cmd; struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa); unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); memcpy((void *)&iocmd->modstats, (void *)&fcpim->del_itn_stats, sizeof(struct bfa_fcpim_del_itn_stats_s)); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; return 0; } static int bfad_iocmd_itnim_get_attr(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_itnim_attr_s *iocmd = (struct bfa_bsg_itnim_attr_s *)cmd; struct bfa_fcs_lport_s *fcs_port; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, iocmd->vf_id, iocmd->lpwwn); if (!fcs_port) iocmd->status = BFA_STATUS_UNKNOWN_LWWN; else iocmd->status = bfa_fcs_itnim_attr_get(fcs_port, iocmd->rpwwn, &iocmd->attr); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } static int bfad_iocmd_itnim_get_iostats(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_itnim_iostats_s *iocmd = (struct bfa_bsg_itnim_iostats_s *)cmd; struct bfa_fcs_lport_s *fcs_port; struct bfa_fcs_itnim_s *itnim; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, iocmd->vf_id, iocmd->lpwwn); if (!fcs_port) { iocmd->status = BFA_STATUS_UNKNOWN_LWWN; bfa_trc(bfad, 0); } else { itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn); if (itnim == NULL) iocmd->status = BFA_STATUS_UNKNOWN_RWWN; else { iocmd->status = BFA_STATUS_OK; if (bfa_fcs_itnim_get_halitn(itnim)) memcpy((void *)&iocmd->iostats, (void *) &(bfa_fcs_itnim_get_halitn(itnim)->stats), sizeof(struct bfa_itnim_iostats_s)); } } spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } static int bfad_iocmd_itnim_reset_stats(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_rport_reset_stats_s *iocmd = (struct bfa_bsg_rport_reset_stats_s *)cmd; struct bfa_fcs_lport_s *fcs_port; struct bfa_fcs_itnim_s *itnim; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, iocmd->vf_id, iocmd->pwwn); if (!fcs_port) iocmd->status = BFA_STATUS_UNKNOWN_LWWN; else { itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn); if (itnim == NULL) iocmd->status = BFA_STATUS_UNKNOWN_RWWN; else { iocmd->status = BFA_STATUS_OK; bfa_fcs_itnim_stats_clear(fcs_port, iocmd->rpwwn); bfa_itnim_clear_stats(bfa_fcs_itnim_get_halitn(itnim)); } } spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } static int bfad_iocmd_itnim_get_itnstats(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_itnim_itnstats_s *iocmd = (struct bfa_bsg_itnim_itnstats_s *)cmd; struct bfa_fcs_lport_s *fcs_port; struct bfa_fcs_itnim_s *itnim; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, iocmd->vf_id, iocmd->lpwwn); if (!fcs_port) { iocmd->status = BFA_STATUS_UNKNOWN_LWWN; bfa_trc(bfad, 0); } else { itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn); if (itnim == NULL) iocmd->status = BFA_STATUS_UNKNOWN_RWWN; else { iocmd->status = BFA_STATUS_OK; bfa_fcs_itnim_stats_get(fcs_port, iocmd->rpwwn, &iocmd->itnstats); } } spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } static int bfad_iocmd_fcport_enable(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_fcport_enable(&bfad->bfa); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } static int bfad_iocmd_fcport_disable(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_fcport_disable(&bfad->bfa); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } static int bfad_iocmd_ioc_get_pcifn_cfg(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_pcifn_cfg_s *iocmd = (struct bfa_bsg_pcifn_cfg_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_ablk_query(&bfad->bfa.modules.ablk, &iocmd->pcifn_cfg, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } static int bfad_iocmd_pcifn_create(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_ablk_pf_create(&bfad->bfa.modules.ablk, &iocmd->pcifn_id, iocmd->port, iocmd->pcifn_class, iocmd->bw_min, iocmd->bw_max, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } static int bfad_iocmd_pcifn_delete(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_ablk_pf_delete(&bfad->bfa.modules.ablk, iocmd->pcifn_id, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } static int bfad_iocmd_pcifn_bw(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_ablk_pf_update(&bfad->bfa.modules.ablk, iocmd->pcifn_id, iocmd->bw_min, iocmd->bw_max, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); bfa_trc(bfad, iocmd->status); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; bfa_trc(bfad, iocmd->status); out: return 0; } static int bfad_iocmd_adapter_cfg_mode(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_adapter_cfg_mode_s *iocmd = (struct bfa_bsg_adapter_cfg_mode_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags = 0; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_ablk_adapter_config(&bfad->bfa.modules.ablk, iocmd->cfg.mode, iocmd->cfg.max_pf, iocmd->cfg.max_vf, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } static int bfad_iocmd_port_cfg_mode(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_port_cfg_mode_s *iocmd = (struct bfa_bsg_port_cfg_mode_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags = 0; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_ablk_port_config(&bfad->bfa.modules.ablk, iocmd->instance, iocmd->cfg.mode, iocmd->cfg.max_pf, iocmd->cfg.max_vf, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } static int bfad_iocmd_ablk_optrom(struct bfad_s *bfad, unsigned int cmd, void *pcmd) { struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); if (cmd == IOCMD_FLASH_ENABLE_OPTROM) iocmd->status = bfa_ablk_optrom_en(&bfad->bfa.modules.ablk, bfad_hcb_comp, &fcomp); else iocmd->status = bfa_ablk_optrom_dis(&bfad->bfa.modules.ablk, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } static int bfad_iocmd_faa_query(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_faa_attr_s *iocmd = (struct bfa_bsg_faa_attr_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); iocmd->status = BFA_STATUS_OK; spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_faa_query(&bfad->bfa, &iocmd->faa_attr, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } static int bfad_iocmd_cee_attr(struct bfad_s *bfad, void *cmd, unsigned int payload_len) { struct bfa_bsg_cee_attr_s *iocmd = (struct bfa_bsg_cee_attr_s *)cmd; void *iocmd_bufptr; struct bfad_hal_comp cee_comp; unsigned long flags; if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_cee_attr_s), sizeof(struct bfa_cee_attr_s)) != BFA_STATUS_OK) { iocmd->status = BFA_STATUS_VERSION_FAIL; return 0; } iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_attr_s); cee_comp.status = 0; init_completion(&cee_comp.comp); mutex_lock(&bfad_mutex); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_cee_get_attr(&bfad->bfa.modules.cee, iocmd_bufptr, bfad_hcb_comp, &cee_comp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) { mutex_unlock(&bfad_mutex); bfa_trc(bfad, 0x5555); goto out; } wait_for_completion(&cee_comp.comp); mutex_unlock(&bfad_mutex); out: return 0; } static int bfad_iocmd_cee_get_stats(struct bfad_s *bfad, void *cmd, unsigned int payload_len) { struct bfa_bsg_cee_stats_s *iocmd = (struct bfa_bsg_cee_stats_s *)cmd; void *iocmd_bufptr; struct bfad_hal_comp cee_comp; unsigned long flags; if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_cee_stats_s), sizeof(struct bfa_cee_stats_s)) != BFA_STATUS_OK) { iocmd->status = BFA_STATUS_VERSION_FAIL; return 0; } iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_stats_s); cee_comp.status = 0; init_completion(&cee_comp.comp); mutex_lock(&bfad_mutex); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_cee_get_stats(&bfad->bfa.modules.cee, iocmd_bufptr, bfad_hcb_comp, &cee_comp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) { mutex_unlock(&bfad_mutex); bfa_trc(bfad, 0x5555); goto out; } wait_for_completion(&cee_comp.comp); mutex_unlock(&bfad_mutex); out: return 0; } static int bfad_iocmd_cee_reset_stats(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_cee_reset_stats(&bfad->bfa.modules.cee, NULL, NULL); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) bfa_trc(bfad, 0x5555); return 0; } static int bfad_iocmd_sfp_media(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_sfp_media_s *iocmd = (struct bfa_bsg_sfp_media_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_sfp_media(BFA_SFP_MOD(&bfad->bfa), &iocmd->media, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); bfa_trc(bfad, iocmd->status); if (iocmd->status != BFA_STATUS_SFP_NOT_READY) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } static int bfad_iocmd_sfp_speed(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_sfp_speed_s *iocmd = (struct bfa_bsg_sfp_speed_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_sfp_speed(BFA_SFP_MOD(&bfad->bfa), iocmd->speed, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); bfa_trc(bfad, iocmd->status); if (iocmd->status != BFA_STATUS_SFP_NOT_READY) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } static int bfad_iocmd_flash_get_attr(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_flash_attr_s *iocmd = (struct bfa_bsg_flash_attr_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_flash_get_attr(BFA_FLASH(&bfad->bfa), &iocmd->attr, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } static int bfad_iocmd_flash_erase_part(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_flash_erase_part(BFA_FLASH(&bfad->bfa), iocmd->type, iocmd->instance, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } static int bfad_iocmd_flash_update_part(struct bfad_s *bfad, void *cmd, unsigned int payload_len) { struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd; void *iocmd_bufptr; struct bfad_hal_comp fcomp; unsigned long flags; if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_flash_s), iocmd->bufsz) != BFA_STATUS_OK) { iocmd->status = BFA_STATUS_VERSION_FAIL; return 0; } iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s); init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa), iocmd->type, iocmd->instance, iocmd_bufptr, iocmd->bufsz, 0, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } static int bfad_iocmd_flash_read_part(struct bfad_s *bfad, void *cmd, unsigned int payload_len) { struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd; struct bfad_hal_comp fcomp; void *iocmd_bufptr; unsigned long flags; if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_flash_s), iocmd->bufsz) != BFA_STATUS_OK) { iocmd->status = BFA_STATUS_VERSION_FAIL; return 0; } iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s); init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), iocmd->type, iocmd->instance, iocmd_bufptr, iocmd->bufsz, 0, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } static int bfad_iocmd_diag_temp(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_diag_get_temp_s *iocmd = (struct bfa_bsg_diag_get_temp_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_diag_tsensor_query(BFA_DIAG_MOD(&bfad->bfa), &iocmd->result, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); bfa_trc(bfad, iocmd->status); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } static int bfad_iocmd_diag_memtest(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_diag_memtest_s *iocmd = (struct bfa_bsg_diag_memtest_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_diag_memtest(BFA_DIAG_MOD(&bfad->bfa), &iocmd->memtest, iocmd->pat, &iocmd->result, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); bfa_trc(bfad, iocmd->status); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } static int bfad_iocmd_diag_loopback(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_diag_loopback_s *iocmd = (struct bfa_bsg_diag_loopback_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_fcdiag_loopback(&bfad->bfa, iocmd->opmode, iocmd->speed, iocmd->lpcnt, iocmd->pat, &iocmd->result, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); bfa_trc(bfad, iocmd->status); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } static int bfad_iocmd_diag_fwping(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_diag_fwping_s *iocmd = (struct bfa_bsg_diag_fwping_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_diag_fwping(BFA_DIAG_MOD(&bfad->bfa), iocmd->cnt, iocmd->pattern, &iocmd->result, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); bfa_trc(bfad, iocmd->status); if (iocmd->status != BFA_STATUS_OK) goto out; bfa_trc(bfad, 0x77771); wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } static int bfad_iocmd_diag_queuetest(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_diag_qtest_s *iocmd = (struct bfa_bsg_diag_qtest_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_fcdiag_queuetest(&bfad->bfa, iocmd->force, iocmd->queue, &iocmd->result, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } static int bfad_iocmd_diag_sfp(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_sfp_show_s *iocmd = (struct bfa_bsg_sfp_show_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_sfp_show(BFA_SFP_MOD(&bfad->bfa), &iocmd->sfp, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); bfa_trc(bfad, iocmd->status); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; bfa_trc(bfad, iocmd->status); out: return 0; } static int bfad_iocmd_diag_led(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_diag_led_s *iocmd = (struct bfa_bsg_diag_led_s *)cmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_diag_ledtest(BFA_DIAG_MOD(&bfad->bfa), &iocmd->ledtest); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } static int bfad_iocmd_diag_beacon_lport(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_diag_beacon_s *iocmd = (struct bfa_bsg_diag_beacon_s *)cmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_diag_beacon_port(BFA_DIAG_MOD(&bfad->bfa), iocmd->beacon, iocmd->link_e2e_beacon, iocmd->second); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } static int bfad_iocmd_diag_lb_stat(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_diag_lb_stat_s *iocmd = (struct bfa_bsg_diag_lb_stat_s *)cmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_fcdiag_lb_is_running(&bfad->bfa); spin_unlock_irqrestore(&bfad->bfad_lock, flags); bfa_trc(bfad, iocmd->status); return 0; } static int bfad_iocmd_diag_dport_enable(struct bfad_s *bfad, void *pcmd) { struct bfa_bsg_dport_enable_s *iocmd = (struct bfa_bsg_dport_enable_s *)pcmd; unsigned long flags; struct bfad_hal_comp fcomp; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_dport_enable(&bfad->bfa, iocmd->lpcnt, iocmd->pat, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) bfa_trc(bfad, iocmd->status); else { wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; } return 0; } static int bfad_iocmd_diag_dport_disable(struct bfad_s *bfad, void *pcmd) { struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd; unsigned long flags; struct bfad_hal_comp fcomp; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_dport_disable(&bfad->bfa, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) bfa_trc(bfad, iocmd->status); else { wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; } return 0; } static int bfad_iocmd_diag_dport_start(struct bfad_s *bfad, void *pcmd) { struct bfa_bsg_dport_enable_s *iocmd = (struct bfa_bsg_dport_enable_s *)pcmd; unsigned long flags; struct bfad_hal_comp fcomp; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_dport_start(&bfad->bfa, iocmd->lpcnt, iocmd->pat, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) { bfa_trc(bfad, iocmd->status); } else { wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; } return 0; } static int bfad_iocmd_diag_dport_show(struct bfad_s *bfad, void *pcmd) { struct bfa_bsg_diag_dport_show_s *iocmd = (struct bfa_bsg_diag_dport_show_s *)pcmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_dport_show(&bfad->bfa, &iocmd->result); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } static int bfad_iocmd_phy_get_attr(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_phy_attr_s *iocmd = (struct bfa_bsg_phy_attr_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_phy_get_attr(BFA_PHY(&bfad->bfa), iocmd->instance, &iocmd->attr, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } static int bfad_iocmd_phy_get_stats(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_phy_stats_s *iocmd = (struct bfa_bsg_phy_stats_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_phy_get_stats(BFA_PHY(&bfad->bfa), iocmd->instance, &iocmd->stats, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } static int bfad_iocmd_phy_read(struct bfad_s *bfad, void *cmd, unsigned int payload_len) { struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd; struct bfad_hal_comp fcomp; void *iocmd_bufptr; unsigned long flags; if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_phy_s), iocmd->bufsz) != BFA_STATUS_OK) { iocmd->status = BFA_STATUS_VERSION_FAIL; return 0; } iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s); init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_phy_read(BFA_PHY(&bfad->bfa), iocmd->instance, iocmd_bufptr, iocmd->bufsz, 0, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; if (iocmd->status != BFA_STATUS_OK) goto out; out: return 0; } static int bfad_iocmd_vhba_query(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_vhba_attr_s *iocmd = (struct bfa_bsg_vhba_attr_s *)cmd; struct bfa_vhba_attr_s *attr = &iocmd->attr; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); attr->pwwn = bfad->bfa.ioc.attr->pwwn; attr->nwwn = bfad->bfa.ioc.attr->nwwn; attr->plog_enabled = (bfa_boolean_t)bfad->bfa.plog->plog_enabled; attr->io_profile = bfa_fcpim_get_io_profile(&bfad->bfa); attr->path_tov = bfa_fcpim_path_tov_get(&bfad->bfa); iocmd->status = BFA_STATUS_OK; spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } static int bfad_iocmd_phy_update(struct bfad_s *bfad, void *cmd, unsigned int payload_len) { struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd; void *iocmd_bufptr; struct bfad_hal_comp fcomp; unsigned long flags; if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_phy_s), iocmd->bufsz) != BFA_STATUS_OK) { iocmd->status = BFA_STATUS_VERSION_FAIL; return 0; } iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s); init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_phy_update(BFA_PHY(&bfad->bfa), iocmd->instance, iocmd_bufptr, iocmd->bufsz, 0, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } static int bfad_iocmd_porglog_get(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd; void *iocmd_bufptr; if (iocmd->bufsz < sizeof(struct bfa_plog_s)) { bfa_trc(bfad, sizeof(struct bfa_plog_s)); iocmd->status = BFA_STATUS_EINVAL; goto out; } iocmd->status = BFA_STATUS_OK; iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s); memcpy(iocmd_bufptr, (u8 *) &bfad->plog_buf, sizeof(struct bfa_plog_s)); out: return 0; } #define BFA_DEBUG_FW_CORE_CHUNK_SZ 0x4000U /* 16K chunks for FW dump */ static int bfad_iocmd_debug_fw_core(struct bfad_s *bfad, void *cmd, unsigned int payload_len) { struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd; void *iocmd_bufptr; unsigned long flags; u32 offset; if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_debug_s), BFA_DEBUG_FW_CORE_CHUNK_SZ) != BFA_STATUS_OK) { iocmd->status = BFA_STATUS_VERSION_FAIL; return 0; } if (iocmd->bufsz < BFA_DEBUG_FW_CORE_CHUNK_SZ || !IS_ALIGNED(iocmd->bufsz, sizeof(u16)) || !IS_ALIGNED(iocmd->offset, sizeof(u32))) { bfa_trc(bfad, BFA_DEBUG_FW_CORE_CHUNK_SZ); iocmd->status = BFA_STATUS_EINVAL; goto out; } iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s); spin_lock_irqsave(&bfad->bfad_lock, flags); offset = iocmd->offset; iocmd->status = bfa_ioc_debug_fwcore(&bfad->bfa.ioc, iocmd_bufptr, &offset, &iocmd->bufsz); iocmd->offset = offset; spin_unlock_irqrestore(&bfad->bfad_lock, flags); out: return 0; } static int bfad_iocmd_debug_ctl(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) { struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; unsigned long flags; if (v_cmd == IOCMD_DEBUG_FW_STATE_CLR) { spin_lock_irqsave(&bfad->bfad_lock, flags); bfad->bfa.ioc.dbg_fwsave_once = BFA_TRUE; spin_unlock_irqrestore(&bfad->bfad_lock, flags); } else if (v_cmd == IOCMD_DEBUG_PORTLOG_CLR) bfad->plog_buf.head = bfad->plog_buf.tail = 0; else if (v_cmd == IOCMD_DEBUG_START_DTRC) bfa_trc_init(bfad->trcmod); else if (v_cmd == IOCMD_DEBUG_STOP_DTRC) bfa_trc_stop(bfad->trcmod); iocmd->status = BFA_STATUS_OK; return 0; } static int bfad_iocmd_porglog_ctl(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_portlogctl_s *iocmd = (struct bfa_bsg_portlogctl_s *)cmd; if (iocmd->ctl == BFA_TRUE) bfad->plog_buf.plog_enabled = 1; else bfad->plog_buf.plog_enabled = 0; iocmd->status = BFA_STATUS_OK; return 0; } static int bfad_iocmd_fcpim_cfg_profile(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) { struct bfa_bsg_fcpim_profile_s *iocmd = (struct bfa_bsg_fcpim_profile_s *)cmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); if (v_cmd == IOCMD_FCPIM_PROFILE_ON) iocmd->status = bfa_fcpim_profile_on(&bfad->bfa, ktime_get_real_seconds()); else if (v_cmd == IOCMD_FCPIM_PROFILE_OFF) iocmd->status = bfa_fcpim_profile_off(&bfad->bfa); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } static int bfad_iocmd_itnim_get_ioprofile(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_itnim_ioprofile_s *iocmd = (struct bfa_bsg_itnim_ioprofile_s *)cmd; struct bfa_fcs_lport_s *fcs_port; struct bfa_fcs_itnim_s *itnim; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, iocmd->vf_id, iocmd->lpwwn); if (!fcs_port) iocmd->status = BFA_STATUS_UNKNOWN_LWWN; else { itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn); if (itnim == NULL) iocmd->status = BFA_STATUS_UNKNOWN_RWWN; else iocmd->status = bfa_itnim_get_ioprofile( bfa_fcs_itnim_get_halitn(itnim), &iocmd->ioprofile); } spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } static int bfad_iocmd_fcport_get_stats(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_fcport_stats_s *iocmd = (struct bfa_bsg_fcport_stats_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; struct bfa_cb_pending_q_s cb_qe; init_completion(&fcomp.comp); bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, &fcomp, &iocmd->stats); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) { bfa_trc(bfad, iocmd->status); goto out; } wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } static int bfad_iocmd_fcport_reset_stats(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; struct bfa_cb_pending_q_s cb_qe; init_completion(&fcomp.comp); bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, &fcomp, NULL); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) { bfa_trc(bfad, iocmd->status); goto out; } wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } static int bfad_iocmd_boot_cfg(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa), BFA_FLASH_PART_BOOT, bfad->bfa.ioc.port_id, &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } static int bfad_iocmd_boot_query(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), BFA_FLASH_PART_BOOT, bfad->bfa.ioc.port_id, &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } static int bfad_iocmd_preboot_query(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_preboot_s *iocmd = (struct bfa_bsg_preboot_s *)cmd; struct bfi_iocfc_cfgrsp_s *cfgrsp = bfad->bfa.iocfc.cfgrsp; struct bfa_boot_pbc_s *pbcfg = &iocmd->cfg; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled; pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns; pbcfg->speed = cfgrsp->pbc_cfg.port_speed; memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun)); iocmd->status = BFA_STATUS_OK; spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } static int bfad_iocmd_ethboot_cfg(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa), BFA_FLASH_PART_PXECFG, bfad->bfa.ioc.port_id, &iocmd->cfg, sizeof(struct bfa_ethboot_cfg_s), 0, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } static int bfad_iocmd_ethboot_query(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), BFA_FLASH_PART_PXECFG, bfad->bfa.ioc.port_id, &iocmd->cfg, sizeof(struct bfa_ethboot_cfg_s), 0, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } static int bfad_iocmd_cfg_trunk(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) { struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); struct bfa_fcport_trunk_s *trunk = &fcport->trunk; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); if (bfa_fcport_is_dport(&bfad->bfa)) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); return BFA_STATUS_DPORT_ERR; } if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) || (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; else { if (v_cmd == IOCMD_TRUNK_ENABLE) { trunk->attr.state = BFA_TRUNK_OFFLINE; bfa_fcport_disable(&bfad->bfa); fcport->cfg.trunked = BFA_TRUE; } else if (v_cmd == IOCMD_TRUNK_DISABLE) { trunk->attr.state = BFA_TRUNK_DISABLED; bfa_fcport_disable(&bfad->bfa); fcport->cfg.trunked = BFA_FALSE; } if (!bfa_fcport_is_disabled(&bfad->bfa)) bfa_fcport_enable(&bfad->bfa); iocmd->status = BFA_STATUS_OK; } spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } static int bfad_iocmd_trunk_get_attr(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_trunk_attr_s *iocmd = (struct bfa_bsg_trunk_attr_s *)cmd; struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); struct bfa_fcport_trunk_s *trunk = &fcport->trunk; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) || (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; else { memcpy((void *)&iocmd->attr, (void *)&trunk->attr, sizeof(struct bfa_trunk_attr_s)); iocmd->attr.port_id = bfa_lps_get_base_pid(&bfad->bfa); iocmd->status = BFA_STATUS_OK; } spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } static int bfad_iocmd_qos(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) { struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) { if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; else { if (v_cmd == IOCMD_QOS_ENABLE) fcport->cfg.qos_enabled = BFA_TRUE; else if (v_cmd == IOCMD_QOS_DISABLE) { fcport->cfg.qos_enabled = BFA_FALSE; fcport->cfg.qos_bw.high = BFA_QOS_BW_HIGH; fcport->cfg.qos_bw.med = BFA_QOS_BW_MED; fcport->cfg.qos_bw.low = BFA_QOS_BW_LOW; } } } spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } static int bfad_iocmd_qos_get_attr(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_qos_attr_s *iocmd = (struct bfa_bsg_qos_attr_s *)cmd; struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; else { iocmd->attr.state = fcport->qos_attr.state; iocmd->attr.total_bb_cr = be32_to_cpu(fcport->qos_attr.total_bb_cr); iocmd->attr.qos_bw.high = fcport->cfg.qos_bw.high; iocmd->attr.qos_bw.med = fcport->cfg.qos_bw.med; iocmd->attr.qos_bw.low = fcport->cfg.qos_bw.low; iocmd->attr.qos_bw_op = fcport->qos_attr.qos_bw_op; iocmd->status = BFA_STATUS_OK; } spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } static int bfad_iocmd_qos_get_vc_attr(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_qos_vc_attr_s *iocmd = (struct bfa_bsg_qos_vc_attr_s *)cmd; struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr; unsigned long flags; u32 i = 0; spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->attr.total_vc_count = be16_to_cpu(bfa_vc_attr->total_vc_count); iocmd->attr.shared_credit = be16_to_cpu(bfa_vc_attr->shared_credit); iocmd->attr.elp_opmode_flags = be32_to_cpu(bfa_vc_attr->elp_opmode_flags); /* Individual VC info */ while (i < iocmd->attr.total_vc_count) { iocmd->attr.vc_info[i].vc_credit = bfa_vc_attr->vc_info[i].vc_credit; iocmd->attr.vc_info[i].borrow_credit = bfa_vc_attr->vc_info[i].borrow_credit; iocmd->attr.vc_info[i].priority = bfa_vc_attr->vc_info[i].priority; i++; } spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; return 0; } static int bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_fcport_stats_s *iocmd = (struct bfa_bsg_fcport_stats_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; struct bfa_cb_pending_q_s cb_qe; struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); init_completion(&fcomp.comp); bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, &fcomp, &iocmd->stats); spin_lock_irqsave(&bfad->bfad_lock, flags); WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc)); if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; else iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) { bfa_trc(bfad, iocmd->status); goto out; } wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } static int bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; struct bfa_cb_pending_q_s cb_qe; struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); init_completion(&fcomp.comp); bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, &fcomp, NULL); spin_lock_irqsave(&bfad->bfad_lock, flags); WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc)); if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; else iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) { bfa_trc(bfad, iocmd->status); goto out; } wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } static int bfad_iocmd_vf_get_stats(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_vf_stats_s *iocmd = (struct bfa_bsg_vf_stats_s *)cmd; struct bfa_fcs_fabric_s *fcs_vf; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id); if (fcs_vf == NULL) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_UNKNOWN_VFID; goto out; } memcpy((void *)&iocmd->stats, (void *)&fcs_vf->stats, sizeof(struct bfa_vf_stats_s)); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; out: return 0; } static int bfad_iocmd_vf_clr_stats(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_vf_reset_stats_s *iocmd = (struct bfa_bsg_vf_reset_stats_s *)cmd; struct bfa_fcs_fabric_s *fcs_vf; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id); if (fcs_vf == NULL) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_UNKNOWN_VFID; goto out; } memset((void *)&fcs_vf->stats, 0, sizeof(struct bfa_vf_stats_s)); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; out: return 0; } /* * Set the SCSI device sdev_bflags - sdev_bflags are used by the * SCSI mid-layer to choose LUN Scanning mode REPORT_LUNS vs. Sequential Scan * * Internally iterates over all the ITNIM's part of the im_port & sets the * sdev_bflags for the scsi_device associated with LUN #0. */ static void bfad_reset_sdev_bflags(struct bfad_im_port_s *im_port, int lunmask_cfg) { const u32 scan_flags = BLIST_NOREPORTLUN | BLIST_SPARSELUN; struct bfad_itnim_s *itnim; struct scsi_device *sdev; unsigned long flags; spin_lock_irqsave(im_port->shost->host_lock, flags); list_for_each_entry(itnim, &im_port->itnim_mapped_list, list_entry) { sdev = __scsi_device_lookup(im_port->shost, itnim->channel, itnim->scsi_tgt_id, 0); if (sdev) { if (lunmask_cfg == BFA_TRUE) sdev->sdev_bflags |= scan_flags; else sdev->sdev_bflags &= ~scan_flags; } } spin_unlock_irqrestore(im_port->shost->host_lock, flags); } /* Function to reset the LUN SCAN mode */ static void bfad_iocmd_lunmask_reset_lunscan_mode(struct bfad_s *bfad, int lunmask_cfg) { struct bfad_im_port_s *pport_im = bfad->pport.im_port; struct bfad_vport_s *vport = NULL; /* Set the scsi device LUN SCAN flags for base port */ bfad_reset_sdev_bflags(pport_im, lunmask_cfg); /* Set the scsi device LUN SCAN flags for the vports */ list_for_each_entry(vport, &bfad->vport_list, list_entry) bfad_reset_sdev_bflags(vport->drv_port.im_port, lunmask_cfg); } static int bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd) { struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE) { iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_TRUE); /* Set the LUN Scanning mode to be Sequential scan */ if (iocmd->status == BFA_STATUS_OK) bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_TRUE); } else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE) { iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_FALSE); /* Set the LUN Scanning mode to default REPORT_LUNS scan */ if (iocmd->status == BFA_STATUS_OK) bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_FALSE); } else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR) iocmd->status = bfa_fcpim_lunmask_clear(&bfad->bfa); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } static int bfad_iocmd_fcpim_lunmask_query(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_fcpim_lunmask_query_s *iocmd = (struct bfa_bsg_fcpim_lunmask_query_s *)cmd; struct bfa_lunmask_cfg_s *lun_mask = &iocmd->lun_mask; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_fcpim_lunmask_query(&bfad->bfa, lun_mask); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } static int bfad_iocmd_fcpim_cfg_lunmask(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) { struct bfa_bsg_fcpim_lunmask_s *iocmd = (struct bfa_bsg_fcpim_lunmask_s *)cmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); if (v_cmd == IOCMD_FCPIM_LUNMASK_ADD) iocmd->status = bfa_fcpim_lunmask_add(&bfad->bfa, iocmd->vf_id, &iocmd->pwwn, iocmd->rpwwn, iocmd->lun); else if (v_cmd == IOCMD_FCPIM_LUNMASK_DELETE) iocmd->status = bfa_fcpim_lunmask_delete(&bfad->bfa, iocmd->vf_id, &iocmd->pwwn, iocmd->rpwwn, iocmd->lun); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } static int bfad_iocmd_fcpim_throttle_query(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_fcpim_throttle_s *iocmd = (struct bfa_bsg_fcpim_throttle_s *)cmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_fcpim_throttle_get(&bfad->bfa, (void *)&iocmd->throttle); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } static int bfad_iocmd_fcpim_throttle_set(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_fcpim_throttle_s *iocmd = (struct bfa_bsg_fcpim_throttle_s *)cmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_fcpim_throttle_set(&bfad->bfa, iocmd->throttle.cfg_value); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } static int bfad_iocmd_tfru_read(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_tfru_s *iocmd = (struct bfa_bsg_tfru_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags = 0; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_tfru_read(BFA_FRU(&bfad->bfa), &iocmd->data, iocmd->len, iocmd->offset, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status == BFA_STATUS_OK) { wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; } return 0; } static int bfad_iocmd_tfru_write(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_tfru_s *iocmd = (struct bfa_bsg_tfru_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags = 0; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_tfru_write(BFA_FRU(&bfad->bfa), &iocmd->data, iocmd->len, iocmd->offset, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status == BFA_STATUS_OK) { wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; } return 0; } static int bfad_iocmd_fruvpd_read(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_fruvpd_s *iocmd = (struct bfa_bsg_fruvpd_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags = 0; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_fruvpd_read(BFA_FRU(&bfad->bfa), &iocmd->data, iocmd->len, iocmd->offset, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status == BFA_STATUS_OK) { wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; } return 0; } static int bfad_iocmd_fruvpd_update(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_fruvpd_s *iocmd = (struct bfa_bsg_fruvpd_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags = 0; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_fruvpd_update(BFA_FRU(&bfad->bfa), &iocmd->data, iocmd->len, iocmd->offset, bfad_hcb_comp, &fcomp, iocmd->trfr_cmpl); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status == BFA_STATUS_OK) { wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; } return 0; } static int bfad_iocmd_fruvpd_get_max_size(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_fruvpd_max_size_s *iocmd = (struct bfa_bsg_fruvpd_max_size_s *)cmd; unsigned long flags = 0; spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_fruvpd_get_max_size(BFA_FRU(&bfad->bfa), &iocmd->max_size); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } static int bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd, unsigned int payload_len) { int rc = -EINVAL; switch (cmd) { case IOCMD_IOC_ENABLE: rc = bfad_iocmd_ioc_enable(bfad, iocmd); break; case IOCMD_IOC_DISABLE: rc = bfad_iocmd_ioc_disable(bfad, iocmd); break; case IOCMD_IOC_GET_INFO: rc = bfad_iocmd_ioc_get_info(bfad, iocmd); break; case IOCMD_IOC_GET_ATTR: rc = bfad_iocmd_ioc_get_attr(bfad, iocmd); break; case IOCMD_IOC_GET_STATS: rc = bfad_iocmd_ioc_get_stats(bfad, iocmd); break; case IOCMD_IOC_GET_FWSTATS: rc = bfad_iocmd_ioc_get_fwstats(bfad, iocmd, payload_len); break; case IOCMD_IOC_RESET_STATS: case IOCMD_IOC_RESET_FWSTATS: rc = bfad_iocmd_ioc_reset_stats(bfad, iocmd, cmd); break; case IOCMD_IOC_SET_ADAPTER_NAME: case IOCMD_IOC_SET_PORT_NAME: rc = bfad_iocmd_ioc_set_name(bfad, iocmd, cmd); break; case IOCMD_IOCFC_GET_ATTR: rc = bfad_iocmd_iocfc_get_attr(bfad, iocmd); break; case IOCMD_IOCFC_SET_INTR: rc = bfad_iocmd_iocfc_set_intr(bfad, iocmd); break; case IOCMD_PORT_ENABLE: rc = bfad_iocmd_port_enable(bfad, iocmd); break; case IOCMD_PORT_DISABLE: rc = bfad_iocmd_port_disable(bfad, iocmd); break; case IOCMD_PORT_GET_ATTR: rc = bfad_iocmd_port_get_attr(bfad, iocmd); break; case IOCMD_PORT_GET_STATS: rc = bfad_iocmd_port_get_stats(bfad, iocmd, payload_len); break; case IOCMD_PORT_RESET_STATS: rc = bfad_iocmd_port_reset_stats(bfad, iocmd); break; case IOCMD_PORT_CFG_TOPO: case IOCMD_PORT_CFG_SPEED: case IOCMD_PORT_CFG_ALPA: case IOCMD_PORT_CLR_ALPA: rc = bfad_iocmd_set_port_cfg(bfad, iocmd, cmd); break; case IOCMD_PORT_CFG_MAXFRSZ: rc = bfad_iocmd_port_cfg_maxfrsize(bfad, iocmd); break; case IOCMD_PORT_BBCR_ENABLE: case IOCMD_PORT_BBCR_DISABLE: rc = bfad_iocmd_port_cfg_bbcr(bfad, cmd, iocmd); break; case IOCMD_PORT_BBCR_GET_ATTR: rc = bfad_iocmd_port_get_bbcr_attr(bfad, iocmd); break; case IOCMD_LPORT_GET_ATTR: rc = bfad_iocmd_lport_get_attr(bfad, iocmd); break; case IOCMD_LPORT_GET_STATS: rc = bfad_iocmd_lport_get_stats(bfad, iocmd); break; case IOCMD_LPORT_RESET_STATS: rc = bfad_iocmd_lport_reset_stats(bfad, iocmd); break; case IOCMD_LPORT_GET_IOSTATS: rc = bfad_iocmd_lport_get_iostats(bfad, iocmd); break; case IOCMD_LPORT_GET_RPORTS: rc = bfad_iocmd_lport_get_rports(bfad, iocmd, payload_len); break; case IOCMD_RPORT_GET_ATTR: rc = bfad_iocmd_rport_get_attr(bfad, iocmd); break; case IOCMD_RPORT_GET_ADDR: rc = bfad_iocmd_rport_get_addr(bfad, iocmd); break; case IOCMD_RPORT_GET_STATS: rc = bfad_iocmd_rport_get_stats(bfad, iocmd); break; case IOCMD_RPORT_RESET_STATS: rc = bfad_iocmd_rport_clr_stats(bfad, iocmd); break; case IOCMD_RPORT_SET_SPEED: rc = bfad_iocmd_rport_set_speed(bfad, iocmd); break; case IOCMD_VPORT_GET_ATTR: rc = bfad_iocmd_vport_get_attr(bfad, iocmd); break; case IOCMD_VPORT_GET_STATS: rc = bfad_iocmd_vport_get_stats(bfad, iocmd); break; case IOCMD_VPORT_RESET_STATS: rc = bfad_iocmd_vport_clr_stats(bfad, iocmd); break; case IOCMD_FABRIC_GET_LPORTS: rc = bfad_iocmd_fabric_get_lports(bfad, iocmd, payload_len); break; case IOCMD_RATELIM_ENABLE: case IOCMD_RATELIM_DISABLE: rc = bfad_iocmd_ratelim(bfad, cmd, iocmd); break; case IOCMD_RATELIM_DEF_SPEED: rc = bfad_iocmd_ratelim_speed(bfad, cmd, iocmd); break; case IOCMD_FCPIM_FAILOVER: rc = bfad_iocmd_cfg_fcpim(bfad, iocmd); break; case IOCMD_FCPIM_MODSTATS: rc = bfad_iocmd_fcpim_get_modstats(bfad, iocmd); break; case IOCMD_FCPIM_MODSTATSCLR: rc = bfad_iocmd_fcpim_clr_modstats(bfad, iocmd); break; case IOCMD_FCPIM_DEL_ITN_STATS: rc = bfad_iocmd_fcpim_get_del_itn_stats(bfad, iocmd); break; case IOCMD_ITNIM_GET_ATTR: rc = bfad_iocmd_itnim_get_attr(bfad, iocmd); break; case IOCMD_ITNIM_GET_IOSTATS: rc = bfad_iocmd_itnim_get_iostats(bfad, iocmd); break; case IOCMD_ITNIM_RESET_STATS: rc = bfad_iocmd_itnim_reset_stats(bfad, iocmd); break; case IOCMD_ITNIM_GET_ITNSTATS: rc = bfad_iocmd_itnim_get_itnstats(bfad, iocmd); break; case IOCMD_FCPORT_ENABLE: rc = bfad_iocmd_fcport_enable(bfad, iocmd); break; case IOCMD_FCPORT_DISABLE: rc = bfad_iocmd_fcport_disable(bfad, iocmd); break; case IOCMD_IOC_PCIFN_CFG: rc = bfad_iocmd_ioc_get_pcifn_cfg(bfad, iocmd); break; case IOCMD_IOC_FW_SIG_INV: rc = bfad_iocmd_ioc_fw_sig_inv(bfad, iocmd); break; case IOCMD_PCIFN_CREATE: rc = bfad_iocmd_pcifn_create(bfad, iocmd); break; case IOCMD_PCIFN_DELETE: rc = bfad_iocmd_pcifn_delete(bfad, iocmd); break; case IOCMD_PCIFN_BW: rc = bfad_iocmd_pcifn_bw(bfad, iocmd); break; case IOCMD_ADAPTER_CFG_MODE: rc = bfad_iocmd_adapter_cfg_mode(bfad, iocmd); break; case IOCMD_PORT_CFG_MODE: rc = bfad_iocmd_port_cfg_mode(bfad, iocmd); break; case IOCMD_FLASH_ENABLE_OPTROM: case IOCMD_FLASH_DISABLE_OPTROM: rc = bfad_iocmd_ablk_optrom(bfad, cmd, iocmd); break; case IOCMD_FAA_QUERY: rc = bfad_iocmd_faa_query(bfad, iocmd); break; case IOCMD_CEE_GET_ATTR: rc = bfad_iocmd_cee_attr(bfad, iocmd, payload_len); break; case IOCMD_CEE_GET_STATS: rc = bfad_iocmd_cee_get_stats(bfad, iocmd, payload_len); break; case IOCMD_CEE_RESET_STATS: rc = bfad_iocmd_cee_reset_stats(bfad, iocmd); break; case IOCMD_SFP_MEDIA: rc = bfad_iocmd_sfp_media(bfad, iocmd); break; case IOCMD_SFP_SPEED: rc = bfad_iocmd_sfp_speed(bfad, iocmd); break; case IOCMD_FLASH_GET_ATTR: rc = bfad_iocmd_flash_get_attr(bfad, iocmd); break; case IOCMD_FLASH_ERASE_PART: rc = bfad_iocmd_flash_erase_part(bfad, iocmd); break; case IOCMD_FLASH_UPDATE_PART: rc = bfad_iocmd_flash_update_part(bfad, iocmd, payload_len); break; case IOCMD_FLASH_READ_PART: rc = bfad_iocmd_flash_read_part(bfad, iocmd, payload_len); break; case IOCMD_DIAG_TEMP: rc = bfad_iocmd_diag_temp(bfad, iocmd); break; case IOCMD_DIAG_MEMTEST: rc = bfad_iocmd_diag_memtest(bfad, iocmd); break; case IOCMD_DIAG_LOOPBACK: rc = bfad_iocmd_diag_loopback(bfad, iocmd); break; case IOCMD_DIAG_FWPING: rc = bfad_iocmd_diag_fwping(bfad, iocmd); break; case IOCMD_DIAG_QUEUETEST: rc = bfad_iocmd_diag_queuetest(bfad, iocmd); break; case IOCMD_DIAG_SFP: rc = bfad_iocmd_diag_sfp(bfad, iocmd); break; case IOCMD_DIAG_LED: rc = bfad_iocmd_diag_led(bfad, iocmd); break; case IOCMD_DIAG_BEACON_LPORT: rc = bfad_iocmd_diag_beacon_lport(bfad, iocmd); break; case IOCMD_DIAG_LB_STAT: rc = bfad_iocmd_diag_lb_stat(bfad, iocmd); break; case IOCMD_DIAG_DPORT_ENABLE: rc = bfad_iocmd_diag_dport_enable(bfad, iocmd); break; case IOCMD_DIAG_DPORT_DISABLE: rc = bfad_iocmd_diag_dport_disable(bfad, iocmd); break; case IOCMD_DIAG_DPORT_SHOW: rc = bfad_iocmd_diag_dport_show(bfad, iocmd); break; case IOCMD_DIAG_DPORT_START: rc = bfad_iocmd_diag_dport_start(bfad, iocmd); break; case IOCMD_PHY_GET_ATTR: rc = bfad_iocmd_phy_get_attr(bfad, iocmd); break; case IOCMD_PHY_GET_STATS: rc = bfad_iocmd_phy_get_stats(bfad, iocmd); break; case IOCMD_PHY_UPDATE_FW: rc = bfad_iocmd_phy_update(bfad, iocmd, payload_len); break; case IOCMD_PHY_READ_FW: rc = bfad_iocmd_phy_read(bfad, iocmd, payload_len); break; case IOCMD_VHBA_QUERY: rc = bfad_iocmd_vhba_query(bfad, iocmd); break; case IOCMD_DEBUG_PORTLOG: rc = bfad_iocmd_porglog_get(bfad, iocmd); break; case IOCMD_DEBUG_FW_CORE: rc = bfad_iocmd_debug_fw_core(bfad, iocmd, payload_len); break; case IOCMD_DEBUG_FW_STATE_CLR: case IOCMD_DEBUG_PORTLOG_CLR: case IOCMD_DEBUG_START_DTRC: case IOCMD_DEBUG_STOP_DTRC: rc = bfad_iocmd_debug_ctl(bfad, iocmd, cmd); break; case IOCMD_DEBUG_PORTLOG_CTL: rc = bfad_iocmd_porglog_ctl(bfad, iocmd); break; case IOCMD_FCPIM_PROFILE_ON: case IOCMD_FCPIM_PROFILE_OFF: rc = bfad_iocmd_fcpim_cfg_profile(bfad, iocmd, cmd); break; case IOCMD_ITNIM_GET_IOPROFILE: rc = bfad_iocmd_itnim_get_ioprofile(bfad, iocmd); break; case IOCMD_FCPORT_GET_STATS: rc = bfad_iocmd_fcport_get_stats(bfad, iocmd); break; case IOCMD_FCPORT_RESET_STATS: rc = bfad_iocmd_fcport_reset_stats(bfad, iocmd); break; case IOCMD_BOOT_CFG: rc = bfad_iocmd_boot_cfg(bfad, iocmd); break; case IOCMD_BOOT_QUERY: rc = bfad_iocmd_boot_query(bfad, iocmd); break; case IOCMD_PREBOOT_QUERY: rc = bfad_iocmd_preboot_query(bfad, iocmd); break; case IOCMD_ETHBOOT_CFG: rc = bfad_iocmd_ethboot_cfg(bfad, iocmd); break; case IOCMD_ETHBOOT_QUERY: rc = bfad_iocmd_ethboot_query(bfad, iocmd); break; case IOCMD_TRUNK_ENABLE: case IOCMD_TRUNK_DISABLE: rc = bfad_iocmd_cfg_trunk(bfad, iocmd, cmd); break; case IOCMD_TRUNK_GET_ATTR: rc = bfad_iocmd_trunk_get_attr(bfad, iocmd); break; case IOCMD_QOS_ENABLE: case IOCMD_QOS_DISABLE: rc = bfad_iocmd_qos(bfad, iocmd, cmd); break; case IOCMD_QOS_GET_ATTR: rc = bfad_iocmd_qos_get_attr(bfad, iocmd); break; case IOCMD_QOS_GET_VC_ATTR: rc = bfad_iocmd_qos_get_vc_attr(bfad, iocmd); break; case IOCMD_QOS_GET_STATS: rc = bfad_iocmd_qos_get_stats(bfad, iocmd); break; case IOCMD_QOS_RESET_STATS: rc = bfad_iocmd_qos_reset_stats(bfad, iocmd); break; case IOCMD_QOS_SET_BW: rc = bfad_iocmd_qos_set_bw(bfad, iocmd); break; case IOCMD_VF_GET_STATS: rc = bfad_iocmd_vf_get_stats(bfad, iocmd); break; case IOCMD_VF_RESET_STATS: rc = bfad_iocmd_vf_clr_stats(bfad, iocmd); break; case IOCMD_FCPIM_LUNMASK_ENABLE: case IOCMD_FCPIM_LUNMASK_DISABLE: case IOCMD_FCPIM_LUNMASK_CLEAR: rc = bfad_iocmd_lunmask(bfad, iocmd, cmd); break; case IOCMD_FCPIM_LUNMASK_QUERY: rc = bfad_iocmd_fcpim_lunmask_query(bfad, iocmd); break; case IOCMD_FCPIM_LUNMASK_ADD: case IOCMD_FCPIM_LUNMASK_DELETE: rc = bfad_iocmd_fcpim_cfg_lunmask(bfad, iocmd, cmd); break; case IOCMD_FCPIM_THROTTLE_QUERY: rc = bfad_iocmd_fcpim_throttle_query(bfad, iocmd); break; case IOCMD_FCPIM_THROTTLE_SET: rc = bfad_iocmd_fcpim_throttle_set(bfad, iocmd); break; /* TFRU */ case IOCMD_TFRU_READ: rc = bfad_iocmd_tfru_read(bfad, iocmd); break; case IOCMD_TFRU_WRITE: rc = bfad_iocmd_tfru_write(bfad, iocmd); break; /* FRU */ case IOCMD_FRUVPD_READ: rc = bfad_iocmd_fruvpd_read(bfad, iocmd); break; case IOCMD_FRUVPD_UPDATE: rc = bfad_iocmd_fruvpd_update(bfad, iocmd); break; case IOCMD_FRUVPD_GET_MAX_SIZE: rc = bfad_iocmd_fruvpd_get_max_size(bfad, iocmd); break; default: rc = -EINVAL; break; } return rc; } static int bfad_im_bsg_vendor_request(struct bsg_job *job) { struct fc_bsg_request *bsg_request = job->request; struct fc_bsg_reply *bsg_reply = job->reply; uint32_t vendor_cmd = bsg_request->rqst_data.h_vendor.vendor_cmd[0]; struct Scsi_Host *shost = fc_bsg_to_shost(job); struct bfad_im_port_s *im_port = bfad_get_im_port(shost); struct bfad_s *bfad = im_port->bfad; void *payload_kbuf; int rc = -EINVAL; /* Allocate a temp buffer to hold the passed in user space command */ payload_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL); if (!payload_kbuf) { rc = -ENOMEM; goto out; } /* Copy the sg_list passed in to a linear buffer: holds the cmnd data */ sg_copy_to_buffer(job->request_payload.sg_list, job->request_payload.sg_cnt, payload_kbuf, job->request_payload.payload_len); /* Invoke IOCMD handler - to handle all the vendor command requests */ rc = bfad_iocmd_handler(bfad, vendor_cmd, payload_kbuf, job->request_payload.payload_len); if (rc != BFA_STATUS_OK) goto error; /* Copy the response data to the job->reply_payload sg_list */ sg_copy_from_buffer(job->reply_payload.sg_list, job->reply_payload.sg_cnt, payload_kbuf, job->reply_payload.payload_len); /* free the command buffer */ kfree(payload_kbuf); /* Fill the BSG job reply data */ job->reply_len = job->reply_payload.payload_len; bsg_reply->reply_payload_rcv_len = job->reply_payload.payload_len; bsg_reply->result = rc; bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return rc; error: /* free the command buffer */ kfree(payload_kbuf); out: bsg_reply->result = rc; job->reply_len = sizeof(uint32_t); bsg_reply->reply_payload_rcv_len = 0; return rc; } /* FC passthru call backs */ static u64 bfad_fcxp_get_req_sgaddr_cb(void *bfad_fcxp, int sgeid) { struct bfad_fcxp *drv_fcxp = bfad_fcxp; struct bfa_sge_s *sge; u64 addr; sge = drv_fcxp->req_sge + sgeid; addr = (u64)(size_t) sge->sg_addr; return addr; } static u32 bfad_fcxp_get_req_sglen_cb(void *bfad_fcxp, int sgeid) { struct bfad_fcxp *drv_fcxp = bfad_fcxp; struct bfa_sge_s *sge; sge = drv_fcxp->req_sge + sgeid; return sge->sg_len; } static u64 bfad_fcxp_get_rsp_sgaddr_cb(void *bfad_fcxp, int sgeid) { struct bfad_fcxp *drv_fcxp = bfad_fcxp; struct bfa_sge_s *sge; u64 addr; sge = drv_fcxp->rsp_sge + sgeid; addr = (u64)(size_t) sge->sg_addr; return addr; } static u32 bfad_fcxp_get_rsp_sglen_cb(void *bfad_fcxp, int sgeid) { struct bfad_fcxp *drv_fcxp = bfad_fcxp; struct bfa_sge_s *sge; sge = drv_fcxp->rsp_sge + sgeid; return sge->sg_len; } static void bfad_send_fcpt_cb(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs) { struct bfad_fcxp *drv_fcxp = bfad_fcxp; drv_fcxp->req_status = req_status; drv_fcxp->rsp_len = rsp_len; /* bfa_fcxp will be automatically freed by BFA */ drv_fcxp->bfa_fcxp = NULL; complete(&drv_fcxp->comp); } static struct bfad_buf_info * bfad_fcxp_map_sg(struct bfad_s *bfad, void *payload_kbuf, uint32_t payload_len, uint32_t *num_sgles) { struct bfad_buf_info *buf_base, *buf_info; struct bfa_sge_s *sg_table; int sge_num = 1; buf_base = kcalloc(sizeof(struct bfad_buf_info) + sizeof(struct bfa_sge_s), sge_num, GFP_KERNEL); if (!buf_base) return NULL; sg_table = (struct bfa_sge_s *) (((uint8_t *)buf_base) + (sizeof(struct bfad_buf_info) * sge_num)); /* Allocate dma coherent memory */ buf_info = buf_base; buf_info->size = payload_len; buf_info->virt = dma_alloc_coherent(&bfad->pcidev->dev, buf_info->size, &buf_info->phys, GFP_KERNEL); if (!buf_info->virt) goto out_free_mem; /* copy the linear bsg buffer to buf_info */ memcpy(buf_info->virt, payload_kbuf, buf_info->size); /* * Setup SG table */ sg_table->sg_len = buf_info->size; sg_table->sg_addr = (void *)(size_t) buf_info->phys; *num_sgles = sge_num; return buf_base; out_free_mem: kfree(buf_base); return NULL; } static void bfad_fcxp_free_mem(struct bfad_s *bfad, struct bfad_buf_info *buf_base, uint32_t num_sgles) { int i; struct bfad_buf_info *buf_info = buf_base; if (buf_base) { for (i = 0; i < num_sgles; buf_info++, i++) { if (buf_info->virt != NULL) dma_free_coherent(&bfad->pcidev->dev, buf_info->size, buf_info->virt, buf_info->phys); } kfree(buf_base); } } static int bfad_fcxp_bsg_send(struct bsg_job *job, struct bfad_fcxp *drv_fcxp, bfa_bsg_fcpt_t *bsg_fcpt) { struct bfa_fcxp_s *hal_fcxp; struct bfad_s *bfad = drv_fcxp->port->bfad; unsigned long flags; uint8_t lp_tag; spin_lock_irqsave(&bfad->bfad_lock, flags); /* Allocate bfa_fcxp structure */ hal_fcxp = bfa_fcxp_req_rsp_alloc(drv_fcxp, &bfad->bfa, drv_fcxp->num_req_sgles, drv_fcxp->num_rsp_sgles, bfad_fcxp_get_req_sgaddr_cb, bfad_fcxp_get_req_sglen_cb, bfad_fcxp_get_rsp_sgaddr_cb, bfad_fcxp_get_rsp_sglen_cb, BFA_TRUE); if (!hal_fcxp) { bfa_trc(bfad, 0); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return BFA_STATUS_ENOMEM; } drv_fcxp->bfa_fcxp = hal_fcxp; lp_tag = bfa_lps_get_tag_from_pid(&bfad->bfa, bsg_fcpt->fchs.s_id); bfa_fcxp_send(hal_fcxp, drv_fcxp->bfa_rport, bsg_fcpt->vf_id, lp_tag, bsg_fcpt->cts, bsg_fcpt->cos, job->request_payload.payload_len, &bsg_fcpt->fchs, bfad_send_fcpt_cb, bfad, job->reply_payload.payload_len, bsg_fcpt->tsecs); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return BFA_STATUS_OK; } static int bfad_im_bsg_els_ct_request(struct bsg_job *job) { struct bfa_bsg_data *bsg_data; struct Scsi_Host *shost = fc_bsg_to_shost(job); struct bfad_im_port_s *im_port = bfad_get_im_port(shost); struct bfad_s *bfad = im_port->bfad; bfa_bsg_fcpt_t *bsg_fcpt; struct bfad_fcxp *drv_fcxp; struct bfa_fcs_lport_s *fcs_port; struct bfa_fcs_rport_s *fcs_rport; struct fc_bsg_request *bsg_request = job->request; struct fc_bsg_reply *bsg_reply = job->reply; uint32_t command_type = bsg_request->msgcode; unsigned long flags; struct bfad_buf_info *rsp_buf_info; void *req_kbuf = NULL, *rsp_kbuf = NULL; int rc = -EINVAL; job->reply_len = sizeof(uint32_t); /* Atleast uint32_t reply_len */ bsg_reply->reply_payload_rcv_len = 0; /* Get the payload passed in from userspace */ bsg_data = (struct bfa_bsg_data *) (((char *)bsg_request) + sizeof(struct fc_bsg_request)); if (bsg_data == NULL) goto out; /* * Allocate buffer for bsg_fcpt and do a copy_from_user op for payload * buffer of size bsg_data->payload_len */ bsg_fcpt = kzalloc(bsg_data->payload_len, GFP_KERNEL); if (!bsg_fcpt) { rc = -ENOMEM; goto out; } if (copy_from_user((uint8_t *)bsg_fcpt, (void *)(unsigned long)bsg_data->payload, bsg_data->payload_len)) { kfree(bsg_fcpt); rc = -EIO; goto out; } drv_fcxp = kzalloc(sizeof(struct bfad_fcxp), GFP_KERNEL); if (drv_fcxp == NULL) { kfree(bsg_fcpt); rc = -ENOMEM; goto out; } spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, bsg_fcpt->vf_id, bsg_fcpt->lpwwn); if (fcs_port == NULL) { bsg_fcpt->status = BFA_STATUS_UNKNOWN_LWWN; spin_unlock_irqrestore(&bfad->bfad_lock, flags); goto out_free_mem; } /* Check if the port is online before sending FC Passthru cmd */ if (!bfa_fcs_lport_is_online(fcs_port)) { bsg_fcpt->status = BFA_STATUS_PORT_OFFLINE; spin_unlock_irqrestore(&bfad->bfad_lock, flags); goto out_free_mem; } drv_fcxp->port = fcs_port->bfad_port; if (!drv_fcxp->port->bfad) drv_fcxp->port->bfad = bfad; /* Fetch the bfa_rport - if nexus needed */ if (command_type == FC_BSG_HST_ELS_NOLOGIN || command_type == FC_BSG_HST_CT) { /* BSG HST commands: no nexus needed */ drv_fcxp->bfa_rport = NULL; } else if (command_type == FC_BSG_RPT_ELS || command_type == FC_BSG_RPT_CT) { /* BSG RPT commands: nexus needed */ fcs_rport = bfa_fcs_lport_get_rport_by_pwwn(fcs_port, bsg_fcpt->dpwwn); if (fcs_rport == NULL) { bsg_fcpt->status = BFA_STATUS_UNKNOWN_RWWN; spin_unlock_irqrestore(&bfad->bfad_lock, flags); goto out_free_mem; } drv_fcxp->bfa_rport = fcs_rport->bfa_rport; } else { /* Unknown BSG msgcode; return -EINVAL */ spin_unlock_irqrestore(&bfad->bfad_lock, flags); goto out_free_mem; } spin_unlock_irqrestore(&bfad->bfad_lock, flags); /* allocate memory for req / rsp buffers */ req_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL); if (!req_kbuf) { printk(KERN_INFO "bfa %s: fcpt request buffer alloc failed\n", bfad->pci_name); rc = -ENOMEM; goto out_free_mem; } rsp_kbuf = kzalloc(job->reply_payload.payload_len, GFP_KERNEL); if (!rsp_kbuf) { printk(KERN_INFO "bfa %s: fcpt response buffer alloc failed\n", bfad->pci_name); rc = -ENOMEM; goto out_free_mem; } /* map req sg - copy the sg_list passed in to the linear buffer */ sg_copy_to_buffer(job->request_payload.sg_list, job->request_payload.sg_cnt, req_kbuf, job->request_payload.payload_len); drv_fcxp->reqbuf_info = bfad_fcxp_map_sg(bfad, req_kbuf, job->request_payload.payload_len, &drv_fcxp->num_req_sgles); if (!drv_fcxp->reqbuf_info) { printk(KERN_INFO "bfa %s: fcpt request fcxp_map_sg failed\n", bfad->pci_name); rc = -ENOMEM; goto out_free_mem; } drv_fcxp->req_sge = (struct bfa_sge_s *) (((uint8_t *)drv_fcxp->reqbuf_info) + (sizeof(struct bfad_buf_info) * drv_fcxp->num_req_sgles)); /* map rsp sg */ drv_fcxp->rspbuf_info = bfad_fcxp_map_sg(bfad, rsp_kbuf, job->reply_payload.payload_len, &drv_fcxp->num_rsp_sgles); if (!drv_fcxp->rspbuf_info) { printk(KERN_INFO "bfa %s: fcpt response fcxp_map_sg failed\n", bfad->pci_name); rc = -ENOMEM; goto out_free_mem; } rsp_buf_info = (struct bfad_buf_info *)drv_fcxp->rspbuf_info; drv_fcxp->rsp_sge = (struct bfa_sge_s *) (((uint8_t *)drv_fcxp->rspbuf_info) + (sizeof(struct bfad_buf_info) * drv_fcxp->num_rsp_sgles)); /* fcxp send */ init_completion(&drv_fcxp->comp); rc = bfad_fcxp_bsg_send(job, drv_fcxp, bsg_fcpt); if (rc == BFA_STATUS_OK) { wait_for_completion(&drv_fcxp->comp); bsg_fcpt->status = drv_fcxp->req_status; } else { bsg_fcpt->status = rc; goto out_free_mem; } /* fill the job->reply data */ if (drv_fcxp->req_status == BFA_STATUS_OK) { job->reply_len = drv_fcxp->rsp_len; bsg_reply->reply_payload_rcv_len = drv_fcxp->rsp_len; bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; } else { bsg_reply->reply_payload_rcv_len = sizeof(struct fc_bsg_ctels_reply); job->reply_len = sizeof(uint32_t); bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_REJECT; } /* Copy the response data to the reply_payload sg list */ sg_copy_from_buffer(job->reply_payload.sg_list, job->reply_payload.sg_cnt, (uint8_t *)rsp_buf_info->virt, job->reply_payload.payload_len); out_free_mem: bfad_fcxp_free_mem(bfad, drv_fcxp->rspbuf_info, drv_fcxp->num_rsp_sgles); bfad_fcxp_free_mem(bfad, drv_fcxp->reqbuf_info, drv_fcxp->num_req_sgles); kfree(req_kbuf); kfree(rsp_kbuf); /* Need a copy to user op */ if (copy_to_user((void *)(unsigned long)bsg_data->payload, (void *)bsg_fcpt, bsg_data->payload_len)) rc = -EIO; kfree(bsg_fcpt); kfree(drv_fcxp); out: bsg_reply->result = rc; if (rc == BFA_STATUS_OK) bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return rc; } int bfad_im_bsg_request(struct bsg_job *job) { struct fc_bsg_request *bsg_request = job->request; struct fc_bsg_reply *bsg_reply = job->reply; uint32_t rc = BFA_STATUS_OK; switch (bsg_request->msgcode) { case FC_BSG_HST_VENDOR: /* Process BSG HST Vendor requests */ rc = bfad_im_bsg_vendor_request(job); break; case FC_BSG_HST_ELS_NOLOGIN: case FC_BSG_RPT_ELS: case FC_BSG_HST_CT: case FC_BSG_RPT_CT: /* Process BSG ELS/CT commands */ rc = bfad_im_bsg_els_ct_request(job); break; default: bsg_reply->result = rc = -EINVAL; bsg_reply->reply_payload_rcv_len = 0; break; } return rc; } int bfad_im_bsg_timeout(struct bsg_job *job) { /* Don't complete the BSG job request - return -EAGAIN * to reset bsg job timeout : for ELS/CT pass thru we * already have timer to track the request. */ return -EAGAIN; }
linux-master
drivers/scsi/bfa/bfad_bsg.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. * Copyright (c) 2014- QLogic Corporation. * All rights reserved * www.qlogic.com * * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. */ #include "bfad_drv.h" #include "bfa_defs_svc.h" #include "bfa_port.h" #include "bfi.h" #include "bfa_ioc.h" BFA_TRC_FILE(CNA, PORT); static void bfa_port_stats_swap(struct bfa_port_s *port, union bfa_port_stats_u *stats) { u32 *dip = (u32 *) stats; __be32 t0, t1; int i; for (i = 0; i < sizeof(union bfa_port_stats_u)/sizeof(u32); i += 2) { t0 = dip[i]; t1 = dip[i + 1]; #ifdef __BIG_ENDIAN dip[i] = be32_to_cpu(t0); dip[i + 1] = be32_to_cpu(t1); #else dip[i] = be32_to_cpu(t1); dip[i + 1] = be32_to_cpu(t0); #endif } } /* * bfa_port_enable_isr() * * * @param[in] port - Pointer to the port module * status - Return status from the f/w * * @return void */ static void bfa_port_enable_isr(struct bfa_port_s *port, bfa_status_t status) { bfa_trc(port, status); port->endis_pending = BFA_FALSE; port->endis_cbfn(port->endis_cbarg, status); } /* * bfa_port_disable_isr() * * * @param[in] port - Pointer to the port module * status - Return status from the f/w * * @return void */ static void bfa_port_disable_isr(struct bfa_port_s *port, bfa_status_t status) { bfa_trc(port, status); port->endis_pending = BFA_FALSE; port->endis_cbfn(port->endis_cbarg, status); } /* * bfa_port_get_stats_isr() * * * @param[in] port - Pointer to the Port module * status - Return status from the f/w * * @return void */ static void bfa_port_get_stats_isr(struct bfa_port_s *port, bfa_status_t status) { port->stats_status = status; port->stats_busy = BFA_FALSE; if (status == BFA_STATUS_OK) { memcpy(port->stats, port->stats_dma.kva, sizeof(union bfa_port_stats_u)); bfa_port_stats_swap(port, port->stats); port->stats->fc.secs_reset = ktime_get_seconds() - port->stats_reset_time; } if (port->stats_cbfn) { port->stats_cbfn(port->stats_cbarg, status); port->stats_cbfn = NULL; } } /* * bfa_port_clear_stats_isr() * * * @param[in] port - Pointer to the Port module * status - Return status from the f/w * * @return void */ static void bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status) { port->stats_status = status; port->stats_busy = BFA_FALSE; /* * re-initialize time stamp for stats reset */ port->stats_reset_time = ktime_get_seconds(); if (port->stats_cbfn) { port->stats_cbfn(port->stats_cbarg, status); port->stats_cbfn = NULL; } } /* * bfa_port_isr() * * * @param[in] Pointer to the Port module data structure. * * @return void */ static void bfa_port_isr(void *cbarg, struct bfi_mbmsg_s *m) { struct bfa_port_s *port = (struct bfa_port_s *) cbarg; union bfi_port_i2h_msg_u *i2hmsg; i2hmsg = (union bfi_port_i2h_msg_u *) m; bfa_trc(port, m->mh.msg_id); switch (m->mh.msg_id) { case BFI_PORT_I2H_ENABLE_RSP: if (port->endis_pending == BFA_FALSE) break; bfa_port_enable_isr(port, i2hmsg->enable_rsp.status); break; case BFI_PORT_I2H_DISABLE_RSP: if (port->endis_pending == BFA_FALSE) break; bfa_port_disable_isr(port, i2hmsg->disable_rsp.status); break; case BFI_PORT_I2H_GET_STATS_RSP: /* Stats busy flag is still set? (may be cmd timed out) */ if (port->stats_busy == BFA_FALSE) break; bfa_port_get_stats_isr(port, i2hmsg->getstats_rsp.status); break; case BFI_PORT_I2H_CLEAR_STATS_RSP: if (port->stats_busy == BFA_FALSE) break; bfa_port_clear_stats_isr(port, i2hmsg->clearstats_rsp.status); break; default: WARN_ON(1); } } /* * bfa_port_meminfo() * * * @param[in] void * * @return Size of DMA region */ u32 bfa_port_meminfo(void) { return BFA_ROUNDUP(sizeof(union bfa_port_stats_u), BFA_DMA_ALIGN_SZ); } /* * bfa_port_mem_claim() * * * @param[in] port Port module pointer * dma_kva Kernel Virtual Address of Port DMA Memory * dma_pa Physical Address of Port DMA Memory * * @return void */ void bfa_port_mem_claim(struct bfa_port_s *port, u8 *dma_kva, u64 dma_pa) { port->stats_dma.kva = dma_kva; port->stats_dma.pa = dma_pa; } /* * bfa_port_enable() * * Send the Port enable request to the f/w * * @param[in] Pointer to the Port module data structure. * * @return Status */ bfa_status_t bfa_port_enable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn, void *cbarg) { struct bfi_port_generic_req_s *m; /* If port is PBC disabled, return error */ if (port->pbc_disabled) { bfa_trc(port, BFA_STATUS_PBC); return BFA_STATUS_PBC; } if (bfa_ioc_is_disabled(port->ioc)) { bfa_trc(port, BFA_STATUS_IOC_DISABLED); return BFA_STATUS_IOC_DISABLED; } if (!bfa_ioc_is_operational(port->ioc)) { bfa_trc(port, BFA_STATUS_IOC_FAILURE); return BFA_STATUS_IOC_FAILURE; } /* if port is d-port enabled, return error */ if (port->dport_enabled) { bfa_trc(port, BFA_STATUS_DPORT_ERR); return BFA_STATUS_DPORT_ERR; } if (port->endis_pending) { bfa_trc(port, BFA_STATUS_DEVBUSY); return BFA_STATUS_DEVBUSY; } m = (struct bfi_port_generic_req_s *) port->endis_mb.msg; port->msgtag++; port->endis_cbfn = cbfn; port->endis_cbarg = cbarg; port->endis_pending = BFA_TRUE; bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_ENABLE_REQ, bfa_ioc_portid(port->ioc)); bfa_ioc_mbox_queue(port->ioc, &port->endis_mb); return BFA_STATUS_OK; } /* * bfa_port_disable() * * Send the Port disable request to the f/w * * @param[in] Pointer to the Port module data structure. * * @return Status */ bfa_status_t bfa_port_disable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn, void *cbarg) { struct bfi_port_generic_req_s *m; /* If port is PBC disabled, return error */ if (port->pbc_disabled) { bfa_trc(port, BFA_STATUS_PBC); return BFA_STATUS_PBC; } if (bfa_ioc_is_disabled(port->ioc)) { bfa_trc(port, BFA_STATUS_IOC_DISABLED); return BFA_STATUS_IOC_DISABLED; } if (!bfa_ioc_is_operational(port->ioc)) { bfa_trc(port, BFA_STATUS_IOC_FAILURE); return BFA_STATUS_IOC_FAILURE; } /* if port is d-port enabled, return error */ if (port->dport_enabled) { bfa_trc(port, BFA_STATUS_DPORT_ERR); return BFA_STATUS_DPORT_ERR; } if (port->endis_pending) { bfa_trc(port, BFA_STATUS_DEVBUSY); return BFA_STATUS_DEVBUSY; } m = (struct bfi_port_generic_req_s *) port->endis_mb.msg; port->msgtag++; port->endis_cbfn = cbfn; port->endis_cbarg = cbarg; port->endis_pending = BFA_TRUE; bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_DISABLE_REQ, bfa_ioc_portid(port->ioc)); bfa_ioc_mbox_queue(port->ioc, &port->endis_mb); return BFA_STATUS_OK; } /* * bfa_port_get_stats() * * Send the request to the f/w to fetch Port statistics. * * @param[in] Pointer to the Port module data structure. * * @return Status */ bfa_status_t bfa_port_get_stats(struct bfa_port_s *port, union bfa_port_stats_u *stats, bfa_port_stats_cbfn_t cbfn, void *cbarg) { struct bfi_port_get_stats_req_s *m; if (!bfa_ioc_is_operational(port->ioc)) { bfa_trc(port, BFA_STATUS_IOC_FAILURE); return BFA_STATUS_IOC_FAILURE; } if (port->stats_busy) { bfa_trc(port, BFA_STATUS_DEVBUSY); return BFA_STATUS_DEVBUSY; } m = (struct bfi_port_get_stats_req_s *) port->stats_mb.msg; port->stats = stats; port->stats_cbfn = cbfn; port->stats_cbarg = cbarg; port->stats_busy = BFA_TRUE; bfa_dma_be_addr_set(m->dma_addr, port->stats_dma.pa); bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_GET_STATS_REQ, bfa_ioc_portid(port->ioc)); bfa_ioc_mbox_queue(port->ioc, &port->stats_mb); return BFA_STATUS_OK; } /* * bfa_port_clear_stats() * * * @param[in] Pointer to the Port module data structure. * * @return Status */ bfa_status_t bfa_port_clear_stats(struct bfa_port_s *port, bfa_port_stats_cbfn_t cbfn, void *cbarg) { struct bfi_port_generic_req_s *m; if (!bfa_ioc_is_operational(port->ioc)) { bfa_trc(port, BFA_STATUS_IOC_FAILURE); return BFA_STATUS_IOC_FAILURE; } if (port->stats_busy) { bfa_trc(port, BFA_STATUS_DEVBUSY); return BFA_STATUS_DEVBUSY; } m = (struct bfi_port_generic_req_s *) port->stats_mb.msg; port->stats_cbfn = cbfn; port->stats_cbarg = cbarg; port->stats_busy = BFA_TRUE; bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_CLEAR_STATS_REQ, bfa_ioc_portid(port->ioc)); bfa_ioc_mbox_queue(port->ioc, &port->stats_mb); return BFA_STATUS_OK; } /* * bfa_port_notify() * * Port module IOC event handler * * @param[in] Pointer to the Port module data structure. * @param[in] IOC event structure * * @return void */ void bfa_port_notify(void *arg, enum bfa_ioc_event_e event) { struct bfa_port_s *port = (struct bfa_port_s *) arg; switch (event) { case BFA_IOC_E_DISABLED: case BFA_IOC_E_FAILED: /* Fail any pending get_stats/clear_stats requests */ if (port->stats_busy) { if (port->stats_cbfn) port->stats_cbfn(port->stats_cbarg, BFA_STATUS_FAILED); port->stats_cbfn = NULL; port->stats_busy = BFA_FALSE; } /* Clear any enable/disable is pending */ if (port->endis_pending) { if (port->endis_cbfn) port->endis_cbfn(port->endis_cbarg, BFA_STATUS_FAILED); port->endis_cbfn = NULL; port->endis_pending = BFA_FALSE; } /* clear D-port mode */ if (port->dport_enabled) bfa_port_set_dportenabled(port, BFA_FALSE); break; default: break; } } /* * bfa_port_attach() * * * @param[in] port - Pointer to the Port module data structure * ioc - Pointer to the ioc module data structure * dev - Pointer to the device driver module data structure * The device driver specific mbox ISR functions have * this pointer as one of the parameters. * trcmod - * * @return void */ void bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc, void *dev, struct bfa_trc_mod_s *trcmod) { WARN_ON(!port); port->dev = dev; port->ioc = ioc; port->trcmod = trcmod; port->stats_busy = BFA_FALSE; port->endis_pending = BFA_FALSE; port->stats_cbfn = NULL; port->endis_cbfn = NULL; port->pbc_disabled = BFA_FALSE; port->dport_enabled = BFA_FALSE; bfa_ioc_mbox_regisr(port->ioc, BFI_MC_PORT, bfa_port_isr, port); bfa_q_qe_init(&port->ioc_notify); bfa_ioc_notify_init(&port->ioc_notify, bfa_port_notify, port); list_add_tail(&port->ioc_notify.qe, &port->ioc->notify_q); /* * initialize time stamp for stats reset */ port->stats_reset_time = ktime_get_seconds(); bfa_trc(port, 0); } /* * bfa_port_set_dportenabled(); * * Port module- set pbc disabled flag * * @param[in] port - Pointer to the Port module data structure * * @return void */ void bfa_port_set_dportenabled(struct bfa_port_s *port, bfa_boolean_t enabled) { port->dport_enabled = enabled; } /* * CEE module specific definitions */ /* * bfa_cee_get_attr_isr() * * @brief CEE ISR for get-attributes responses from f/w * * @param[in] cee - Pointer to the CEE module * status - Return status from the f/w * * @return void */ static void bfa_cee_get_attr_isr(struct bfa_cee_s *cee, bfa_status_t status) { struct bfa_cee_lldp_cfg_s *lldp_cfg = &cee->attr->lldp_remote; cee->get_attr_status = status; bfa_trc(cee, 0); if (status == BFA_STATUS_OK) { bfa_trc(cee, 0); memcpy(cee->attr, cee->attr_dma.kva, sizeof(struct bfa_cee_attr_s)); lldp_cfg->time_to_live = be16_to_cpu(lldp_cfg->time_to_live); lldp_cfg->enabled_system_cap = be16_to_cpu(lldp_cfg->enabled_system_cap); } cee->get_attr_pending = BFA_FALSE; if (cee->cbfn.get_attr_cbfn) { bfa_trc(cee, 0); cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg, status); } } /* * bfa_cee_get_stats_isr() * * @brief CEE ISR for get-stats responses from f/w * * @param[in] cee - Pointer to the CEE module * status - Return status from the f/w * * @return void */ static void bfa_cee_get_stats_isr(struct bfa_cee_s *cee, bfa_status_t status) { u32 *buffer; int i; cee->get_stats_status = status; bfa_trc(cee, 0); if (status == BFA_STATUS_OK) { bfa_trc(cee, 0); memcpy(cee->stats, cee->stats_dma.kva, sizeof(struct bfa_cee_stats_s)); /* swap the cee stats */ buffer = (u32 *)cee->stats; for (i = 0; i < (sizeof(struct bfa_cee_stats_s) / sizeof(u32)); i++) buffer[i] = cpu_to_be32(buffer[i]); } cee->get_stats_pending = BFA_FALSE; bfa_trc(cee, 0); if (cee->cbfn.get_stats_cbfn) { bfa_trc(cee, 0); cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg, status); } } /* * bfa_cee_reset_stats_isr() * * @brief CEE ISR for reset-stats responses from f/w * * @param[in] cee - Pointer to the CEE module * status - Return status from the f/w * * @return void */ static void bfa_cee_reset_stats_isr(struct bfa_cee_s *cee, bfa_status_t status) { cee->reset_stats_status = status; cee->reset_stats_pending = BFA_FALSE; if (cee->cbfn.reset_stats_cbfn) cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status); } /* * bfa_cee_meminfo() * * @brief Returns the size of the DMA memory needed by CEE module * * @param[in] void * * @return Size of DMA region */ u32 bfa_cee_meminfo(void) { return BFA_ROUNDUP(sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ) + BFA_ROUNDUP(sizeof(struct bfa_cee_stats_s), BFA_DMA_ALIGN_SZ); } /* * bfa_cee_mem_claim() * * @brief Initialized CEE DMA Memory * * @param[in] cee CEE module pointer * dma_kva Kernel Virtual Address of CEE DMA Memory * dma_pa Physical Address of CEE DMA Memory * * @return void */ void bfa_cee_mem_claim(struct bfa_cee_s *cee, u8 *dma_kva, u64 dma_pa) { cee->attr_dma.kva = dma_kva; cee->attr_dma.pa = dma_pa; cee->stats_dma.kva = dma_kva + BFA_ROUNDUP( sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ); cee->stats_dma.pa = dma_pa + BFA_ROUNDUP( sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ); cee->attr = (struct bfa_cee_attr_s *) dma_kva; cee->stats = (struct bfa_cee_stats_s *) (dma_kva + BFA_ROUNDUP( sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ)); } /* * bfa_cee_get_attr() * * @brief * Send the request to the f/w to fetch CEE attributes. * * @param[in] Pointer to the CEE module data structure. * * @return Status */ bfa_status_t bfa_cee_get_attr(struct bfa_cee_s *cee, struct bfa_cee_attr_s *attr, bfa_cee_get_attr_cbfn_t cbfn, void *cbarg) { struct bfi_cee_get_req_s *cmd; WARN_ON((cee == NULL) || (cee->ioc == NULL)); bfa_trc(cee, 0); if (!bfa_ioc_is_operational(cee->ioc)) { bfa_trc(cee, 0); return BFA_STATUS_IOC_FAILURE; } if (cee->get_attr_pending == BFA_TRUE) { bfa_trc(cee, 0); return BFA_STATUS_DEVBUSY; } cee->get_attr_pending = BFA_TRUE; cmd = (struct bfi_cee_get_req_s *) cee->get_cfg_mb.msg; cee->attr = attr; cee->cbfn.get_attr_cbfn = cbfn; cee->cbfn.get_attr_cbarg = cbarg; bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_CFG_REQ, bfa_ioc_portid(cee->ioc)); bfa_dma_be_addr_set(cmd->dma_addr, cee->attr_dma.pa); bfa_ioc_mbox_queue(cee->ioc, &cee->get_cfg_mb); return BFA_STATUS_OK; } /* * bfa_cee_get_stats() * * @brief * Send the request to the f/w to fetch CEE statistics. * * @param[in] Pointer to the CEE module data structure. * * @return Status */ bfa_status_t bfa_cee_get_stats(struct bfa_cee_s *cee, struct bfa_cee_stats_s *stats, bfa_cee_get_stats_cbfn_t cbfn, void *cbarg) { struct bfi_cee_get_req_s *cmd; WARN_ON((cee == NULL) || (cee->ioc == NULL)); if (!bfa_ioc_is_operational(cee->ioc)) { bfa_trc(cee, 0); return BFA_STATUS_IOC_FAILURE; } if (cee->get_stats_pending == BFA_TRUE) { bfa_trc(cee, 0); return BFA_STATUS_DEVBUSY; } cee->get_stats_pending = BFA_TRUE; cmd = (struct bfi_cee_get_req_s *) cee->get_stats_mb.msg; cee->stats = stats; cee->cbfn.get_stats_cbfn = cbfn; cee->cbfn.get_stats_cbarg = cbarg; bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_STATS_REQ, bfa_ioc_portid(cee->ioc)); bfa_dma_be_addr_set(cmd->dma_addr, cee->stats_dma.pa); bfa_ioc_mbox_queue(cee->ioc, &cee->get_stats_mb); return BFA_STATUS_OK; } /* * bfa_cee_reset_stats() * * @brief Clears CEE Stats in the f/w. * * @param[in] Pointer to the CEE module data structure. * * @return Status */ bfa_status_t bfa_cee_reset_stats(struct bfa_cee_s *cee, bfa_cee_reset_stats_cbfn_t cbfn, void *cbarg) { struct bfi_cee_reset_stats_s *cmd; WARN_ON((cee == NULL) || (cee->ioc == NULL)); if (!bfa_ioc_is_operational(cee->ioc)) { bfa_trc(cee, 0); return BFA_STATUS_IOC_FAILURE; } if (cee->reset_stats_pending == BFA_TRUE) { bfa_trc(cee, 0); return BFA_STATUS_DEVBUSY; } cee->reset_stats_pending = BFA_TRUE; cmd = (struct bfi_cee_reset_stats_s *) cee->reset_stats_mb.msg; cee->cbfn.reset_stats_cbfn = cbfn; cee->cbfn.reset_stats_cbarg = cbarg; bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_RESET_STATS, bfa_ioc_portid(cee->ioc)); bfa_ioc_mbox_queue(cee->ioc, &cee->reset_stats_mb); return BFA_STATUS_OK; } /* * bfa_cee_isrs() * * @brief Handles Mail-box interrupts for CEE module. * * @param[in] Pointer to the CEE module data structure. * * @return void */ static void bfa_cee_isr(void *cbarg, struct bfi_mbmsg_s *m) { union bfi_cee_i2h_msg_u *msg; struct bfi_cee_get_rsp_s *get_rsp; struct bfa_cee_s *cee = (struct bfa_cee_s *) cbarg; msg = (union bfi_cee_i2h_msg_u *) m; get_rsp = (struct bfi_cee_get_rsp_s *) m; bfa_trc(cee, msg->mh.msg_id); switch (msg->mh.msg_id) { case BFI_CEE_I2H_GET_CFG_RSP: bfa_trc(cee, get_rsp->cmd_status); bfa_cee_get_attr_isr(cee, get_rsp->cmd_status); break; case BFI_CEE_I2H_GET_STATS_RSP: bfa_cee_get_stats_isr(cee, get_rsp->cmd_status); break; case BFI_CEE_I2H_RESET_STATS_RSP: bfa_cee_reset_stats_isr(cee, get_rsp->cmd_status); break; default: WARN_ON(1); } } /* * bfa_cee_notify() * * @brief CEE module IOC event handler. * * @param[in] Pointer to the CEE module data structure. * @param[in] IOC event type * * @return void */ static void bfa_cee_notify(void *arg, enum bfa_ioc_event_e event) { struct bfa_cee_s *cee = (struct bfa_cee_s *) arg; bfa_trc(cee, event); switch (event) { case BFA_IOC_E_DISABLED: case BFA_IOC_E_FAILED: if (cee->get_attr_pending == BFA_TRUE) { cee->get_attr_status = BFA_STATUS_FAILED; cee->get_attr_pending = BFA_FALSE; if (cee->cbfn.get_attr_cbfn) { cee->cbfn.get_attr_cbfn( cee->cbfn.get_attr_cbarg, BFA_STATUS_FAILED); } } if (cee->get_stats_pending == BFA_TRUE) { cee->get_stats_status = BFA_STATUS_FAILED; cee->get_stats_pending = BFA_FALSE; if (cee->cbfn.get_stats_cbfn) { cee->cbfn.get_stats_cbfn( cee->cbfn.get_stats_cbarg, BFA_STATUS_FAILED); } } if (cee->reset_stats_pending == BFA_TRUE) { cee->reset_stats_status = BFA_STATUS_FAILED; cee->reset_stats_pending = BFA_FALSE; if (cee->cbfn.reset_stats_cbfn) { cee->cbfn.reset_stats_cbfn( cee->cbfn.reset_stats_cbarg, BFA_STATUS_FAILED); } } break; default: break; } } /* * bfa_cee_attach() * * @brief CEE module-attach API * * @param[in] cee - Pointer to the CEE module data structure * ioc - Pointer to the ioc module data structure * dev - Pointer to the device driver module data structure * The device driver specific mbox ISR functions have * this pointer as one of the parameters. * * @return void */ void bfa_cee_attach(struct bfa_cee_s *cee, struct bfa_ioc_s *ioc, void *dev) { WARN_ON(cee == NULL); cee->dev = dev; cee->ioc = ioc; bfa_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee); bfa_q_qe_init(&cee->ioc_notify); bfa_ioc_notify_init(&cee->ioc_notify, bfa_cee_notify, cee); list_add_tail(&cee->ioc_notify.qe, &cee->ioc->notify_q); }
linux-master
drivers/scsi/bfa/bfa_port.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. * Copyright (c) 2014- QLogic Corporation. * All rights reserved * www.qlogic.com * * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. */ /* * bfad_im.c Linux driver IM module. */ #include <linux/export.h> #include "bfad_drv.h" #include "bfad_im.h" #include "bfa_fcs.h" BFA_TRC_FILE(LDRV, IM); DEFINE_IDR(bfad_im_port_index); struct scsi_transport_template *bfad_im_scsi_transport_template; struct scsi_transport_template *bfad_im_scsi_vport_transport_template; static void bfad_im_itnim_work_handler(struct work_struct *work); static int bfad_im_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmnd); static int bfad_im_slave_alloc(struct scsi_device *sdev); static void bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, struct bfad_itnim_s *itnim); void bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio, enum bfi_ioim_status io_status, u8 scsi_status, int sns_len, u8 *sns_info, s32 residue) { struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; struct bfad_s *bfad = drv; struct bfad_itnim_data_s *itnim_data; struct bfad_itnim_s *itnim; u8 host_status = DID_OK; switch (io_status) { case BFI_IOIM_STS_OK: bfa_trc(bfad, scsi_status); scsi_set_resid(cmnd, 0); if (sns_len > 0) { bfa_trc(bfad, sns_len); if (sns_len > SCSI_SENSE_BUFFERSIZE) sns_len = SCSI_SENSE_BUFFERSIZE; memcpy(cmnd->sense_buffer, sns_info, sns_len); } if (residue > 0) { bfa_trc(bfad, residue); scsi_set_resid(cmnd, residue); if (!sns_len && (scsi_status == SAM_STAT_GOOD) && (scsi_bufflen(cmnd) - residue) < cmnd->underflow) { bfa_trc(bfad, 0); host_status = DID_ERROR; } } cmnd->result = host_status << 16 | scsi_status; break; case BFI_IOIM_STS_TIMEDOUT: cmnd->result = DID_TIME_OUT << 16; break; case BFI_IOIM_STS_PATHTOV: cmnd->result = DID_TRANSPORT_DISRUPTED << 16; break; default: cmnd->result = DID_ERROR << 16; } /* Unmap DMA, if host is NULL, it means a scsi passthru cmd */ if (cmnd->device->host != NULL) scsi_dma_unmap(cmnd); cmnd->host_scribble = NULL; bfa_trc(bfad, cmnd->result); itnim_data = cmnd->device->hostdata; if (itnim_data) { itnim = itnim_data->itnim; if (!cmnd->result && itnim && (bfa_lun_queue_depth > cmnd->device->queue_depth)) { /* Queue depth adjustment for good status completion */ bfad_ramp_up_qdepth(itnim, cmnd->device); } else if (cmnd->result == SAM_STAT_TASK_SET_FULL && itnim) { /* qfull handling */ bfad_handle_qfull(itnim, cmnd->device); } } scsi_done(cmnd); } void bfa_cb_ioim_good_comp(void *drv, struct bfad_ioim_s *dio) { struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; struct bfad_itnim_data_s *itnim_data; struct bfad_itnim_s *itnim; cmnd->result = DID_OK << 16 | SAM_STAT_GOOD; /* Unmap DMA, if host is NULL, it means a scsi passthru cmd */ if (cmnd->device->host != NULL) scsi_dma_unmap(cmnd); cmnd->host_scribble = NULL; /* Queue depth adjustment */ if (bfa_lun_queue_depth > cmnd->device->queue_depth) { itnim_data = cmnd->device->hostdata; if (itnim_data) { itnim = itnim_data->itnim; if (itnim) bfad_ramp_up_qdepth(itnim, cmnd->device); } } scsi_done(cmnd); } void bfa_cb_ioim_abort(void *drv, struct bfad_ioim_s *dio) { struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; struct bfad_s *bfad = drv; cmnd->result = DID_ERROR << 16; /* Unmap DMA, if host is NULL, it means a scsi passthru cmd */ if (cmnd->device->host != NULL) scsi_dma_unmap(cmnd); bfa_trc(bfad, cmnd->result); cmnd->host_scribble = NULL; } void bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk, enum bfi_tskim_status tsk_status) { struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dtsk; wait_queue_head_t *wq; bfad_priv(cmnd)->status |= tsk_status << 1; set_bit(IO_DONE_BIT, &bfad_priv(cmnd)->status); wq = bfad_priv(cmnd)->wq; bfad_priv(cmnd)->wq = NULL; if (wq) wake_up(wq); } /* * Scsi_Host_template SCSI host template */ /* * Scsi_Host template entry, returns BFAD PCI info. */ static const char * bfad_im_info(struct Scsi_Host *shost) { static char bfa_buf[256]; struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; memset(bfa_buf, 0, sizeof(bfa_buf)); snprintf(bfa_buf, sizeof(bfa_buf), "QLogic BR-series FC/FCOE Adapter, hwpath: %s driver: %s", bfad->pci_name, BFAD_DRIVER_VERSION); return bfa_buf; } /* * Scsi_Host template entry, aborts the specified SCSI command. * * Returns: SUCCESS or FAILED. */ static int bfad_im_abort_handler(struct scsi_cmnd *cmnd) { struct Scsi_Host *shost = cmnd->device->host; struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; struct bfa_ioim_s *hal_io; unsigned long flags; u32 timeout; int rc = FAILED; spin_lock_irqsave(&bfad->bfad_lock, flags); hal_io = (struct bfa_ioim_s *) cmnd->host_scribble; if (!hal_io) { /* IO has been completed, return success */ rc = SUCCESS; goto out; } if (hal_io->dio != (struct bfad_ioim_s *) cmnd) { rc = FAILED; goto out; } bfa_trc(bfad, hal_io->iotag); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "scsi%d: abort cmnd %p iotag %x\n", im_port->shost->host_no, cmnd, hal_io->iotag); (void) bfa_ioim_abort(hal_io); spin_unlock_irqrestore(&bfad->bfad_lock, flags); /* Need to wait until the command get aborted */ timeout = 10; while ((struct bfa_ioim_s *) cmnd->host_scribble == hal_io) { set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(timeout); if (timeout < 4 * HZ) timeout *= 2; } scsi_done(cmnd); bfa_trc(bfad, hal_io->iotag); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "scsi%d: complete abort 0x%p iotag 0x%x\n", im_port->shost->host_no, cmnd, hal_io->iotag); return SUCCESS; out: spin_unlock_irqrestore(&bfad->bfad_lock, flags); return rc; } static bfa_status_t bfad_im_target_reset_send(struct bfad_s *bfad, struct scsi_cmnd *cmnd, struct bfad_itnim_s *itnim) { struct bfa_tskim_s *tskim; struct bfa_itnim_s *bfa_itnim; bfa_status_t rc = BFA_STATUS_OK; struct scsi_lun scsilun; tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd); if (!tskim) { BFA_LOG(KERN_ERR, bfad, bfa_log_level, "target reset, fail to allocate tskim\n"); rc = BFA_STATUS_FAILED; goto out; } /* * Set host_scribble to NULL to avoid aborting a task command if * happens. */ cmnd->host_scribble = NULL; bfad_priv(cmnd)->status = 0; bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim); /* * bfa_itnim can be NULL if the port gets disconnected and the bfa * and fcs layers have cleaned up their nexus with the targets and * the same has not been cleaned up by the shim */ if (bfa_itnim == NULL) { bfa_tskim_free(tskim); BFA_LOG(KERN_ERR, bfad, bfa_log_level, "target reset, bfa_itnim is NULL\n"); rc = BFA_STATUS_FAILED; goto out; } memset(&scsilun, 0, sizeof(scsilun)); bfa_tskim_start(tskim, bfa_itnim, scsilun, FCP_TM_TARGET_RESET, BFAD_TARGET_RESET_TMO); out: return rc; } /* * Scsi_Host template entry, resets a LUN and abort its all commands. * * Returns: SUCCESS or FAILED. * */ static int bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd) { struct Scsi_Host *shost = cmnd->device->host; struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_itnim_data_s *itnim_data = cmnd->device->hostdata; struct bfad_s *bfad = im_port->bfad; struct bfa_tskim_s *tskim; struct bfad_itnim_s *itnim; struct bfa_itnim_s *bfa_itnim; DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); int rc = SUCCESS; unsigned long flags; enum bfi_tskim_status task_status; struct scsi_lun scsilun; spin_lock_irqsave(&bfad->bfad_lock, flags); itnim = itnim_data->itnim; if (!itnim) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); rc = FAILED; goto out; } tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd); if (!tskim) { BFA_LOG(KERN_ERR, bfad, bfa_log_level, "LUN reset, fail to allocate tskim"); spin_unlock_irqrestore(&bfad->bfad_lock, flags); rc = FAILED; goto out; } /* * Set host_scribble to NULL to avoid aborting a task command * if happens. */ cmnd->host_scribble = NULL; bfad_priv(cmnd)->wq = &wq; bfad_priv(cmnd)->status = 0; bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim); /* * bfa_itnim can be NULL if the port gets disconnected and the bfa * and fcs layers have cleaned up their nexus with the targets and * the same has not been cleaned up by the shim */ if (bfa_itnim == NULL) { bfa_tskim_free(tskim); BFA_LOG(KERN_ERR, bfad, bfa_log_level, "lun reset, bfa_itnim is NULL\n"); spin_unlock_irqrestore(&bfad->bfad_lock, flags); rc = FAILED; goto out; } int_to_scsilun(cmnd->device->lun, &scsilun); bfa_tskim_start(tskim, bfa_itnim, scsilun, FCP_TM_LUN_RESET, BFAD_LUN_RESET_TMO); spin_unlock_irqrestore(&bfad->bfad_lock, flags); wait_event(wq, test_bit(IO_DONE_BIT, &bfad_priv(cmnd)->status)); task_status = bfad_priv(cmnd)->status >> 1; if (task_status != BFI_TSKIM_STS_OK) { BFA_LOG(KERN_ERR, bfad, bfa_log_level, "LUN reset failure, status: %d\n", task_status); rc = FAILED; } out: return rc; } /* * Scsi_Host template entry, resets the target and abort all commands. */ static int bfad_im_reset_target_handler(struct scsi_cmnd *cmnd) { struct Scsi_Host *shost = cmnd->device->host; struct scsi_target *starget = scsi_target(cmnd->device); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; struct bfad_itnim_s *itnim; unsigned long flags; u32 rc, rtn = FAILED; DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); enum bfi_tskim_status task_status; spin_lock_irqsave(&bfad->bfad_lock, flags); itnim = bfad_get_itnim(im_port, starget->id); if (itnim) { bfad_priv(cmnd)->wq = &wq; rc = bfad_im_target_reset_send(bfad, cmnd, itnim); if (rc == BFA_STATUS_OK) { /* wait target reset to complete */ spin_unlock_irqrestore(&bfad->bfad_lock, flags); wait_event(wq, test_bit(IO_DONE_BIT, &bfad_priv(cmnd)->status)); spin_lock_irqsave(&bfad->bfad_lock, flags); task_status = bfad_priv(cmnd)->status >> 1; if (task_status != BFI_TSKIM_STS_OK) BFA_LOG(KERN_ERR, bfad, bfa_log_level, "target reset failure," " status: %d\n", task_status); else rtn = SUCCESS; } } spin_unlock_irqrestore(&bfad->bfad_lock, flags); return rtn; } /* * Scsi_Host template entry slave_destroy. */ static void bfad_im_slave_destroy(struct scsi_device *sdev) { sdev->hostdata = NULL; return; } /* * BFA FCS itnim callbacks */ /* * BFA FCS itnim alloc callback, after successful PRLI * Context: Interrupt */ int bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim, struct bfad_itnim_s **itnim_drv) { *itnim_drv = kzalloc(sizeof(struct bfad_itnim_s), GFP_ATOMIC); if (*itnim_drv == NULL) return -ENOMEM; (*itnim_drv)->im = bfad->im; *itnim = &(*itnim_drv)->fcs_itnim; (*itnim_drv)->state = ITNIM_STATE_NONE; /* * Initiaze the itnim_work */ INIT_WORK(&(*itnim_drv)->itnim_work, bfad_im_itnim_work_handler); bfad->bfad_flags |= BFAD_RPORT_ONLINE; return 0; } /* * BFA FCS itnim free callback. * Context: Interrupt. bfad_lock is held */ void bfa_fcb_itnim_free(struct bfad_s *bfad, struct bfad_itnim_s *itnim_drv) { struct bfad_port_s *port; wwn_t wwpn; u32 fcid; char wwpn_str[32], fcid_str[16]; struct bfad_im_s *im = itnim_drv->im; /* online to free state transtion should not happen */ WARN_ON(itnim_drv->state == ITNIM_STATE_ONLINE); itnim_drv->queue_work = 1; /* offline request is not yet done, use the same request to free */ if (itnim_drv->state == ITNIM_STATE_OFFLINE_PENDING) itnim_drv->queue_work = 0; itnim_drv->state = ITNIM_STATE_FREE; port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim); itnim_drv->im_port = port->im_port; wwpn = bfa_fcs_itnim_get_pwwn(&itnim_drv->fcs_itnim); fcid = bfa_fcs_itnim_get_fcid(&itnim_drv->fcs_itnim); wwn2str(wwpn_str, wwpn); fcid2str(fcid_str, fcid); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "ITNIM FREE scsi%d: FCID: %s WWPN: %s\n", port->im_port->shost->host_no, fcid_str, wwpn_str); /* ITNIM processing */ if (itnim_drv->queue_work) queue_work(im->drv_workq, &itnim_drv->itnim_work); } /* * BFA FCS itnim online callback. * Context: Interrupt. bfad_lock is held */ void bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv) { struct bfad_port_s *port; struct bfad_im_s *im = itnim_drv->im; itnim_drv->bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim_drv->fcs_itnim); port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim); itnim_drv->state = ITNIM_STATE_ONLINE; itnim_drv->queue_work = 1; itnim_drv->im_port = port->im_port; /* ITNIM processing */ if (itnim_drv->queue_work) queue_work(im->drv_workq, &itnim_drv->itnim_work); } /* * BFA FCS itnim offline callback. * Context: Interrupt. bfad_lock is held */ void bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv) { struct bfad_port_s *port; struct bfad_s *bfad; struct bfad_im_s *im = itnim_drv->im; port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim); bfad = port->bfad; if ((bfad->pport.flags & BFAD_PORT_DELETE) || (port->flags & BFAD_PORT_DELETE)) { itnim_drv->state = ITNIM_STATE_OFFLINE; return; } itnim_drv->im_port = port->im_port; itnim_drv->state = ITNIM_STATE_OFFLINE_PENDING; itnim_drv->queue_work = 1; /* ITNIM processing */ if (itnim_drv->queue_work) queue_work(im->drv_workq, &itnim_drv->itnim_work); } /* * Allocate a Scsi_Host for a port. */ int bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port, struct device *dev) { struct bfad_im_port_pointer *im_portp; int error; mutex_lock(&bfad_mutex); error = idr_alloc(&bfad_im_port_index, im_port, 0, 0, GFP_KERNEL); if (error < 0) { mutex_unlock(&bfad_mutex); printk(KERN_WARNING "idr_alloc failure\n"); goto out; } im_port->idr_id = error; mutex_unlock(&bfad_mutex); im_port->shost = bfad_scsi_host_alloc(im_port, bfad); if (!im_port->shost) { error = 1; goto out_free_idr; } im_portp = shost_priv(im_port->shost); im_portp->p = im_port; im_port->shost->unique_id = im_port->idr_id; im_port->shost->this_id = -1; im_port->shost->max_id = MAX_FCP_TARGET; im_port->shost->max_lun = MAX_FCP_LUN; im_port->shost->max_cmd_len = 16; im_port->shost->can_queue = bfad->cfg_data.ioc_queue_depth; if (im_port->port->pvb_type == BFAD_PORT_PHYS_BASE) im_port->shost->transportt = bfad_im_scsi_transport_template; else im_port->shost->transportt = bfad_im_scsi_vport_transport_template; error = scsi_add_host_with_dma(im_port->shost, dev, &bfad->pcidev->dev); if (error) { printk(KERN_WARNING "scsi_add_host failure %d\n", error); goto out_fc_rel; } return 0; out_fc_rel: scsi_host_put(im_port->shost); im_port->shost = NULL; out_free_idr: mutex_lock(&bfad_mutex); idr_remove(&bfad_im_port_index, im_port->idr_id); mutex_unlock(&bfad_mutex); out: return error; } void bfad_im_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port) { bfa_trc(bfad, bfad->inst_no); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Free scsi%d\n", im_port->shost->host_no); fc_remove_host(im_port->shost); scsi_remove_host(im_port->shost); scsi_host_put(im_port->shost); mutex_lock(&bfad_mutex); idr_remove(&bfad_im_port_index, im_port->idr_id); mutex_unlock(&bfad_mutex); } static void bfad_im_port_delete_handler(struct work_struct *work) { struct bfad_im_port_s *im_port = container_of(work, struct bfad_im_port_s, port_delete_work); if (im_port->port->pvb_type != BFAD_PORT_PHYS_BASE) { im_port->flags |= BFAD_PORT_DELETE; fc_vport_terminate(im_port->fc_vport); } } bfa_status_t bfad_im_port_new(struct bfad_s *bfad, struct bfad_port_s *port) { int rc = BFA_STATUS_OK; struct bfad_im_port_s *im_port; im_port = kzalloc(sizeof(struct bfad_im_port_s), GFP_ATOMIC); if (im_port == NULL) { rc = BFA_STATUS_ENOMEM; goto ext; } port->im_port = im_port; im_port->port = port; im_port->bfad = bfad; INIT_WORK(&im_port->port_delete_work, bfad_im_port_delete_handler); INIT_LIST_HEAD(&im_port->itnim_mapped_list); INIT_LIST_HEAD(&im_port->binding_list); ext: return rc; } void bfad_im_port_delete(struct bfad_s *bfad, struct bfad_port_s *port) { struct bfad_im_port_s *im_port = port->im_port; queue_work(bfad->im->drv_workq, &im_port->port_delete_work); } void bfad_im_port_clean(struct bfad_im_port_s *im_port) { struct bfad_fcp_binding *bp, *bp_new; unsigned long flags; struct bfad_s *bfad = im_port->bfad; spin_lock_irqsave(&bfad->bfad_lock, flags); list_for_each_entry_safe(bp, bp_new, &im_port->binding_list, list_entry) { list_del(&bp->list_entry); kfree(bp); } /* the itnim_mapped_list must be empty at this time */ WARN_ON(!list_empty(&im_port->itnim_mapped_list)); spin_unlock_irqrestore(&bfad->bfad_lock, flags); } static void bfad_aen_im_notify_handler(struct work_struct *work) { struct bfad_im_s *im = container_of(work, struct bfad_im_s, aen_im_notify_work); struct bfa_aen_entry_s *aen_entry; struct bfad_s *bfad = im->bfad; struct Scsi_Host *shost = bfad->pport.im_port->shost; void *event_data; unsigned long flags; while (!list_empty(&bfad->active_aen_q)) { spin_lock_irqsave(&bfad->bfad_aen_spinlock, flags); bfa_q_deq(&bfad->active_aen_q, &aen_entry); spin_unlock_irqrestore(&bfad->bfad_aen_spinlock, flags); event_data = (char *)aen_entry + sizeof(struct list_head); fc_host_post_vendor_event(shost, fc_get_event_number(), sizeof(struct bfa_aen_entry_s) - sizeof(struct list_head), (char *)event_data, BFAD_NL_VENDOR_ID); spin_lock_irqsave(&bfad->bfad_aen_spinlock, flags); list_add_tail(&aen_entry->qe, &bfad->free_aen_q); spin_unlock_irqrestore(&bfad->bfad_aen_spinlock, flags); } } bfa_status_t bfad_im_probe(struct bfad_s *bfad) { struct bfad_im_s *im; im = kzalloc(sizeof(struct bfad_im_s), GFP_KERNEL); if (im == NULL) return BFA_STATUS_ENOMEM; bfad->im = im; im->bfad = bfad; if (bfad_thread_workq(bfad) != BFA_STATUS_OK) { kfree(im); return BFA_STATUS_FAILED; } INIT_WORK(&im->aen_im_notify_work, bfad_aen_im_notify_handler); return BFA_STATUS_OK; } void bfad_im_probe_undo(struct bfad_s *bfad) { if (bfad->im) { bfad_destroy_workq(bfad->im); kfree(bfad->im); bfad->im = NULL; } } struct Scsi_Host * bfad_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad) { struct scsi_host_template *sht; if (im_port->port->pvb_type == BFAD_PORT_PHYS_BASE) sht = &bfad_im_scsi_host_template; else sht = &bfad_im_vport_template; if (max_xfer_size != BFAD_MAX_SECTORS >> 1) sht->max_sectors = max_xfer_size << 1; sht->sg_tablesize = bfad->cfg_data.io_max_sge; return scsi_host_alloc(sht, sizeof(struct bfad_im_port_pointer)); } void bfad_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port) { if (!(im_port->flags & BFAD_PORT_DELETE)) flush_workqueue(bfad->im->drv_workq); bfad_im_scsi_host_free(im_port->bfad, im_port); bfad_im_port_clean(im_port); kfree(im_port); } void bfad_destroy_workq(struct bfad_im_s *im) { if (im && im->drv_workq) { destroy_workqueue(im->drv_workq); im->drv_workq = NULL; } } bfa_status_t bfad_thread_workq(struct bfad_s *bfad) { struct bfad_im_s *im = bfad->im; bfa_trc(bfad, 0); snprintf(im->drv_workq_name, KOBJ_NAME_LEN, "bfad_wq_%d", bfad->inst_no); im->drv_workq = create_singlethread_workqueue(im->drv_workq_name); if (!im->drv_workq) return BFA_STATUS_FAILED; return BFA_STATUS_OK; } /* * Scsi_Host template entry. * * Description: * OS entry point to adjust the queue_depths on a per-device basis. * Called once per device during the bus scan. * Return non-zero if fails. */ static int bfad_im_slave_configure(struct scsi_device *sdev) { scsi_change_queue_depth(sdev, bfa_lun_queue_depth); return 0; } struct scsi_host_template bfad_im_scsi_host_template = { .module = THIS_MODULE, .name = BFAD_DRIVER_NAME, .info = bfad_im_info, .queuecommand = bfad_im_queuecommand, .cmd_size = sizeof(struct bfad_cmd_priv), .eh_timed_out = fc_eh_timed_out, .eh_abort_handler = bfad_im_abort_handler, .eh_device_reset_handler = bfad_im_reset_lun_handler, .eh_target_reset_handler = bfad_im_reset_target_handler, .slave_alloc = bfad_im_slave_alloc, .slave_configure = bfad_im_slave_configure, .slave_destroy = bfad_im_slave_destroy, .this_id = -1, .sg_tablesize = BFAD_IO_MAX_SGE, .cmd_per_lun = 3, .shost_groups = bfad_im_host_groups, .max_sectors = BFAD_MAX_SECTORS, .vendor_id = BFA_PCI_VENDOR_ID_BROCADE, }; struct scsi_host_template bfad_im_vport_template = { .module = THIS_MODULE, .name = BFAD_DRIVER_NAME, .info = bfad_im_info, .queuecommand = bfad_im_queuecommand, .cmd_size = sizeof(struct bfad_cmd_priv), .eh_timed_out = fc_eh_timed_out, .eh_abort_handler = bfad_im_abort_handler, .eh_device_reset_handler = bfad_im_reset_lun_handler, .eh_target_reset_handler = bfad_im_reset_target_handler, .slave_alloc = bfad_im_slave_alloc, .slave_configure = bfad_im_slave_configure, .slave_destroy = bfad_im_slave_destroy, .this_id = -1, .sg_tablesize = BFAD_IO_MAX_SGE, .cmd_per_lun = 3, .shost_groups = bfad_im_vport_groups, .max_sectors = BFAD_MAX_SECTORS, }; bfa_status_t bfad_im_module_init(void) { bfad_im_scsi_transport_template = fc_attach_transport(&bfad_im_fc_function_template); if (!bfad_im_scsi_transport_template) return BFA_STATUS_ENOMEM; bfad_im_scsi_vport_transport_template = fc_attach_transport(&bfad_im_vport_fc_function_template); if (!bfad_im_scsi_vport_transport_template) { fc_release_transport(bfad_im_scsi_transport_template); return BFA_STATUS_ENOMEM; } return BFA_STATUS_OK; } void bfad_im_module_exit(void) { if (bfad_im_scsi_transport_template) fc_release_transport(bfad_im_scsi_transport_template); if (bfad_im_scsi_vport_transport_template) fc_release_transport(bfad_im_scsi_vport_transport_template); idr_destroy(&bfad_im_port_index); } void bfad_ramp_up_qdepth(struct bfad_itnim_s *itnim, struct scsi_device *sdev) { struct scsi_device *tmp_sdev; if (((jiffies - itnim->last_ramp_up_time) > BFA_QUEUE_FULL_RAMP_UP_TIME * HZ) && ((jiffies - itnim->last_queue_full_time) > BFA_QUEUE_FULL_RAMP_UP_TIME * HZ)) { shost_for_each_device(tmp_sdev, sdev->host) { if (bfa_lun_queue_depth > tmp_sdev->queue_depth) { if (tmp_sdev->id != sdev->id) continue; scsi_change_queue_depth(tmp_sdev, tmp_sdev->queue_depth + 1); itnim->last_ramp_up_time = jiffies; } } } } void bfad_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev) { struct scsi_device *tmp_sdev; itnim->last_queue_full_time = jiffies; shost_for_each_device(tmp_sdev, sdev->host) { if (tmp_sdev->id != sdev->id) continue; scsi_track_queue_full(tmp_sdev, tmp_sdev->queue_depth - 1); } } struct bfad_itnim_s * bfad_get_itnim(struct bfad_im_port_s *im_port, int id) { struct bfad_itnim_s *itnim = NULL; /* Search the mapped list for this target ID */ list_for_each_entry(itnim, &im_port->itnim_mapped_list, list_entry) { if (id == itnim->scsi_tgt_id) return itnim; } return NULL; } /* * Function is invoked from the SCSI Host Template slave_alloc() entry point. * Has the logic to query the LUN Mask database to check if this LUN needs to * be made visible to the SCSI mid-layer or not. * * Returns BFA_STATUS_OK if this LUN needs to be added to the OS stack. * Returns -ENXIO to notify SCSI mid-layer to not add this LUN to the OS stack. */ static int bfad_im_check_if_make_lun_visible(struct scsi_device *sdev, struct fc_rport *rport) { struct bfad_itnim_data_s *itnim_data = (struct bfad_itnim_data_s *) rport->dd_data; struct bfa_s *bfa = itnim_data->itnim->bfa_itnim->bfa; struct bfa_rport_s *bfa_rport = itnim_data->itnim->bfa_itnim->rport; struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(bfa); int i = 0, ret = -ENXIO; for (i = 0; i < MAX_LUN_MASK_CFG; i++) { if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE && scsilun_to_int(&lun_list[i].lun) == sdev->lun && lun_list[i].rp_tag == bfa_rport->rport_tag && lun_list[i].lp_tag == (u8)bfa_rport->rport_info.lp_tag) { ret = BFA_STATUS_OK; break; } } return ret; } /* * Scsi_Host template entry slave_alloc */ static int bfad_im_slave_alloc(struct scsi_device *sdev) { struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); struct bfad_itnim_data_s *itnim_data; struct bfa_s *bfa; if (!rport || fc_remote_port_chkready(rport)) return -ENXIO; itnim_data = (struct bfad_itnim_data_s *) rport->dd_data; bfa = itnim_data->itnim->bfa_itnim->bfa; if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED) { /* * We should not mask LUN 0 - since this will translate * to no LUN / TARGET for SCSI ml resulting no scan. */ if (sdev->lun == 0) { sdev->sdev_bflags |= BLIST_NOREPORTLUN | BLIST_SPARSELUN; goto done; } /* * Query LUN Mask configuration - to expose this LUN * to the SCSI mid-layer or to mask it. */ if (bfad_im_check_if_make_lun_visible(sdev, rport) != BFA_STATUS_OK) return -ENXIO; } done: sdev->hostdata = rport->dd_data; return 0; } u32 bfad_im_supported_speeds(struct bfa_s *bfa) { struct bfa_ioc_attr_s *ioc_attr; u32 supported_speed = 0; ioc_attr = kzalloc(sizeof(struct bfa_ioc_attr_s), GFP_KERNEL); if (!ioc_attr) return 0; bfa_ioc_get_attr(&bfa->ioc, ioc_attr); if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_16GBPS) supported_speed |= FC_PORTSPEED_16GBIT | FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT; else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) { if (ioc_attr->adapter_attr.is_mezz) { supported_speed |= FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; } else { supported_speed |= FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT; } } else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_4GBPS) { supported_speed |= FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; } else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_10GBPS) { supported_speed |= FC_PORTSPEED_10GBIT; } kfree(ioc_attr); return supported_speed; } void bfad_fc_host_init(struct bfad_im_port_s *im_port) { struct Scsi_Host *host = im_port->shost; struct bfad_s *bfad = im_port->bfad; struct bfad_port_s *port = im_port->port; char symname[BFA_SYMNAME_MAXLEN]; struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); fc_host_node_name(host) = cpu_to_be64((bfa_fcs_lport_get_nwwn(port->fcs_port))); fc_host_port_name(host) = cpu_to_be64((bfa_fcs_lport_get_pwwn(port->fcs_port))); fc_host_max_npiv_vports(host) = bfa_lps_get_max_vport(&bfad->bfa); fc_host_supported_classes(host) = FC_COS_CLASS3; memset(fc_host_supported_fc4s(host), 0, sizeof(fc_host_supported_fc4s(host))); if (supported_fc4s & BFA_LPORT_ROLE_FCP_IM) /* For FCP type 0x08 */ fc_host_supported_fc4s(host)[2] = 1; /* For fibre channel services type 0x20 */ fc_host_supported_fc4s(host)[7] = 1; strscpy(symname, bfad->bfa_fcs.fabric.bport.port_cfg.sym_name.symname, BFA_SYMNAME_MAXLEN); sprintf(fc_host_symbolic_name(host), "%s", symname); fc_host_supported_speeds(host) = bfad_im_supported_speeds(&bfad->bfa); fc_host_maxframe_size(host) = fcport->cfg.maxfrsize; } static void bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, struct bfad_itnim_s *itnim) { struct fc_rport_identifiers rport_ids; struct fc_rport *fc_rport; struct bfad_itnim_data_s *itnim_data; rport_ids.node_name = cpu_to_be64(bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim)); rport_ids.port_name = cpu_to_be64(bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim)); rport_ids.port_id = bfa_hton3b(bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim)); rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; itnim->fc_rport = fc_rport = fc_remote_port_add(im_port->shost, 0, &rport_ids); if (!fc_rport) return; fc_rport->maxframe_size = bfa_fcs_itnim_get_maxfrsize(&itnim->fcs_itnim); fc_rport->supported_classes = bfa_fcs_itnim_get_cos(&itnim->fcs_itnim); itnim_data = fc_rport->dd_data; itnim_data->itnim = itnim; rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET; if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN) fc_remote_port_rolechg(fc_rport, rport_ids.roles); if ((fc_rport->scsi_target_id != -1) && (fc_rport->scsi_target_id < MAX_FCP_TARGET)) itnim->scsi_tgt_id = fc_rport->scsi_target_id; itnim->channel = fc_rport->channel; return; } /* * Work queue handler using FC transport service * Context: kernel */ static void bfad_im_itnim_work_handler(struct work_struct *work) { struct bfad_itnim_s *itnim = container_of(work, struct bfad_itnim_s, itnim_work); struct bfad_im_s *im = itnim->im; struct bfad_s *bfad = im->bfad; struct bfad_im_port_s *im_port; unsigned long flags; struct fc_rport *fc_rport; wwn_t wwpn; u32 fcid; char wwpn_str[32], fcid_str[16]; spin_lock_irqsave(&bfad->bfad_lock, flags); im_port = itnim->im_port; bfa_trc(bfad, itnim->state); switch (itnim->state) { case ITNIM_STATE_ONLINE: if (!itnim->fc_rport) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); bfad_im_fc_rport_add(im_port, itnim); spin_lock_irqsave(&bfad->bfad_lock, flags); wwpn = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim); fcid = bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim); wwn2str(wwpn_str, wwpn); fcid2str(fcid_str, fcid); list_add_tail(&itnim->list_entry, &im_port->itnim_mapped_list); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "ITNIM ONLINE Target: %d:0:%d " "FCID: %s WWPN: %s\n", im_port->shost->host_no, itnim->scsi_tgt_id, fcid_str, wwpn_str); } else { printk(KERN_WARNING "%s: itnim %llx is already in online state\n", __func__, bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim)); } break; case ITNIM_STATE_OFFLINE_PENDING: itnim->state = ITNIM_STATE_OFFLINE; if (itnim->fc_rport) { fc_rport = itnim->fc_rport; ((struct bfad_itnim_data_s *) fc_rport->dd_data)->itnim = NULL; itnim->fc_rport = NULL; if (!(im_port->port->flags & BFAD_PORT_DELETE)) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); fc_rport->dev_loss_tmo = bfa_fcpim_path_tov_get(&bfad->bfa) + 1; fc_remote_port_delete(fc_rport); spin_lock_irqsave(&bfad->bfad_lock, flags); } wwpn = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim); fcid = bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim); wwn2str(wwpn_str, wwpn); fcid2str(fcid_str, fcid); list_del(&itnim->list_entry); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "ITNIM OFFLINE Target: %d:0:%d " "FCID: %s WWPN: %s\n", im_port->shost->host_no, itnim->scsi_tgt_id, fcid_str, wwpn_str); } break; case ITNIM_STATE_FREE: if (itnim->fc_rport) { fc_rport = itnim->fc_rport; ((struct bfad_itnim_data_s *) fc_rport->dd_data)->itnim = NULL; itnim->fc_rport = NULL; if (!(im_port->port->flags & BFAD_PORT_DELETE)) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); fc_rport->dev_loss_tmo = bfa_fcpim_path_tov_get(&bfad->bfa) + 1; fc_remote_port_delete(fc_rport); spin_lock_irqsave(&bfad->bfad_lock, flags); } list_del(&itnim->list_entry); } kfree(itnim); break; default: WARN_ON(1); break; } spin_unlock_irqrestore(&bfad->bfad_lock, flags); } /* * Scsi_Host template entry, queue a SCSI command to the BFAD. */ static int bfad_im_queuecommand_lck(struct scsi_cmnd *cmnd) { void (*done)(struct scsi_cmnd *) = scsi_done; struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) cmnd->device->host->hostdata[0]; struct bfad_s *bfad = im_port->bfad; struct bfad_itnim_data_s *itnim_data = cmnd->device->hostdata; struct bfad_itnim_s *itnim; struct bfa_ioim_s *hal_io; unsigned long flags; int rc; int sg_cnt = 0; struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); rc = fc_remote_port_chkready(rport); if (rc) { cmnd->result = rc; done(cmnd); return 0; } if (bfad->bfad_flags & BFAD_EEH_BUSY) { if (bfad->bfad_flags & BFAD_EEH_PCI_CHANNEL_IO_PERM_FAILURE) cmnd->result = DID_NO_CONNECT << 16; else cmnd->result = DID_REQUEUE << 16; done(cmnd); return 0; } sg_cnt = scsi_dma_map(cmnd); if (sg_cnt < 0) return SCSI_MLQUEUE_HOST_BUSY; spin_lock_irqsave(&bfad->bfad_lock, flags); if (!(bfad->bfad_flags & BFAD_HAL_START_DONE)) { printk(KERN_WARNING "bfad%d, queuecommand %p %x failed, BFA stopped\n", bfad->inst_no, cmnd, cmnd->cmnd[0]); cmnd->result = DID_NO_CONNECT << 16; goto out_fail_cmd; } itnim = itnim_data->itnim; if (!itnim) { cmnd->result = DID_IMM_RETRY << 16; goto out_fail_cmd; } hal_io = bfa_ioim_alloc(&bfad->bfa, (struct bfad_ioim_s *) cmnd, itnim->bfa_itnim, sg_cnt); if (!hal_io) { printk(KERN_WARNING "hal_io failure\n"); spin_unlock_irqrestore(&bfad->bfad_lock, flags); scsi_dma_unmap(cmnd); return SCSI_MLQUEUE_HOST_BUSY; } cmnd->host_scribble = (char *)hal_io; bfa_ioim_start(hal_io); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; out_fail_cmd: spin_unlock_irqrestore(&bfad->bfad_lock, flags); scsi_dma_unmap(cmnd); if (done) done(cmnd); return 0; } static DEF_SCSI_QCMD(bfad_im_queuecommand) void bfad_rport_online_wait(struct bfad_s *bfad) { int i; int rport_delay = 10; for (i = 0; !(bfad->bfad_flags & BFAD_PORT_ONLINE) && i < bfa_linkup_delay; i++) { set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(HZ); } if (bfad->bfad_flags & BFAD_PORT_ONLINE) { rport_delay = rport_delay < bfa_linkup_delay ? rport_delay : bfa_linkup_delay; for (i = 0; !(bfad->bfad_flags & BFAD_RPORT_ONLINE) && i < rport_delay; i++) { set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(HZ); } if (rport_delay > 0 && (bfad->bfad_flags & BFAD_RPORT_ONLINE)) { set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(rport_delay * HZ); } } } int bfad_get_linkup_delay(struct bfad_s *bfad) { u8 nwwns = 0; wwn_t wwns[BFA_PREBOOT_BOOTLUN_MAX]; int linkup_delay; /* * Querying for the boot target port wwns * -- read from boot information in flash. * If nwwns > 0 => boot over SAN and set linkup_delay = 30 * else => local boot machine set linkup_delay = 0 */ bfa_iocfc_get_bootwwns(&bfad->bfa, &nwwns, wwns); if (nwwns > 0) /* If Boot over SAN set linkup_delay = 30sec */ linkup_delay = 30; else /* If local boot; no linkup_delay */ linkup_delay = 0; return linkup_delay; }
linux-master
drivers/scsi/bfa/bfad_im.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. * Copyright (c) 2014- QLogic Corporation. * All rights reserved * www.qlogic.com * * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. */ #include "bfad_drv.h" #include "bfa_modules.h" #include "bfi_reg.h" BFA_TRC_FILE(HAL, IOCFC_CT); /* * Dummy interrupt handler for handling spurious interrupt during chip-reinit. */ static void bfa_hwct_msix_dummy(struct bfa_s *bfa, int vec) { } void bfa_hwct_reginit(struct bfa_s *bfa) { struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs; void __iomem *kva = bfa_ioc_bar0(&bfa->ioc); int fn = bfa_ioc_pcifn(&bfa->ioc); if (fn == 0) { bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS); bfa_regs->intr_mask = (kva + HOSTFN0_INT_MSK); } else { bfa_regs->intr_status = (kva + HOSTFN1_INT_STATUS); bfa_regs->intr_mask = (kva + HOSTFN1_INT_MSK); } } void bfa_hwct2_reginit(struct bfa_s *bfa) { struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs; void __iomem *kva = bfa_ioc_bar0(&bfa->ioc); bfa_regs->intr_status = (kva + CT2_HOSTFN_INT_STATUS); bfa_regs->intr_mask = (kva + CT2_HOSTFN_INTR_MASK); } void bfa_hwct_reqq_ack(struct bfa_s *bfa, int reqq) { u32 r32; r32 = readl(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]); writel(r32, bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]); } /* * Actions to respond RME Interrupt for Catapult ASIC: * - Write 1 to Interrupt Status register (INTx only - done in bfa_intx()) * - Acknowledge by writing to RME Queue Control register * - Update CI */ void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci) { u32 r32; r32 = readl(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]); writel(r32, bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]); bfa_rspq_ci(bfa, rspq) = ci; writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]); } /* * Actions to respond RME Interrupt for Catapult2 ASIC: * - Write 1 to Interrupt Status register (INTx only - done in bfa_intx()) * - Update CI */ void bfa_hwct2_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci) { bfa_rspq_ci(bfa, rspq) = ci; writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]); } void bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap, u32 *num_vecs, u32 *max_vec_bit) { *msix_vecs_bmap = (1 << BFI_MSIX_CT_MAX) - 1; *max_vec_bit = (1 << (BFI_MSIX_CT_MAX - 1)); *num_vecs = BFI_MSIX_CT_MAX; } /* * Setup MSI-X vector for catapult */ void bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs) { WARN_ON((nvecs != 1) && (nvecs != BFI_MSIX_CT_MAX)); bfa_trc(bfa, nvecs); bfa->msix.nvecs = nvecs; bfa_hwct_msix_uninstall(bfa); } void bfa_hwct_msix_ctrl_install(struct bfa_s *bfa) { if (bfa->msix.nvecs == 0) return; if (bfa->msix.nvecs == 1) bfa->msix.handler[BFI_MSIX_LPU_ERR_CT] = bfa_msix_all; else bfa->msix.handler[BFI_MSIX_LPU_ERR_CT] = bfa_msix_lpu_err; } void bfa_hwct_msix_queue_install(struct bfa_s *bfa) { int i; if (bfa->msix.nvecs == 0) return; if (bfa->msix.nvecs == 1) { for (i = BFI_MSIX_CPE_QMIN_CT; i < BFI_MSIX_CT_MAX; i++) bfa->msix.handler[i] = bfa_msix_all; return; } for (i = BFI_MSIX_CPE_QMIN_CT; i <= BFI_MSIX_CPE_QMAX_CT; i++) bfa->msix.handler[i] = bfa_msix_reqq; for (i = BFI_MSIX_RME_QMIN_CT; i <= BFI_MSIX_RME_QMAX_CT; i++) bfa->msix.handler[i] = bfa_msix_rspq; } void bfa_hwct_msix_uninstall(struct bfa_s *bfa) { int i; for (i = 0; i < BFI_MSIX_CT_MAX; i++) bfa->msix.handler[i] = bfa_hwct_msix_dummy; } /* * Enable MSI-X vectors */ void bfa_hwct_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix) { bfa_trc(bfa, 0); bfa_ioc_isr_mode_set(&bfa->ioc, msix); } void bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end) { *start = BFI_MSIX_RME_QMIN_CT; *end = BFI_MSIX_RME_QMAX_CT; }
linux-master
drivers/scsi/bfa/bfa_hw_ct.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic iSCSI Offload Driver * Copyright (c) 2016 Cavium Inc. */ #include "qedi_dbg.h" #include <linux/vmalloc.h> void qedi_dbg_err(struct qedi_dbg_ctx *qedi, const char *func, u32 line, const char *fmt, ...) { va_list va; struct va_format vaf; va_start(va, fmt); vaf.fmt = fmt; vaf.va = &va; if (likely(qedi) && likely(qedi->pdev)) pr_err("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev), func, line, qedi->host_no, &vaf); else pr_err("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf); va_end(va); } void qedi_dbg_warn(struct qedi_dbg_ctx *qedi, const char *func, u32 line, const char *fmt, ...) { va_list va; struct va_format vaf; va_start(va, fmt); vaf.fmt = fmt; vaf.va = &va; if (!(qedi_dbg_log & QEDI_LOG_WARN)) goto ret; if (likely(qedi) && likely(qedi->pdev)) pr_warn("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev), func, line, qedi->host_no, &vaf); else pr_warn("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf); ret: va_end(va); } void qedi_dbg_notice(struct qedi_dbg_ctx *qedi, const char *func, u32 line, const char *fmt, ...) { va_list va; struct va_format vaf; va_start(va, fmt); vaf.fmt = fmt; vaf.va = &va; if (!(qedi_dbg_log & QEDI_LOG_NOTICE)) goto ret; if (likely(qedi) && likely(qedi->pdev)) pr_notice("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev), func, line, qedi->host_no, &vaf); else pr_notice("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf); ret: va_end(va); } void qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line, u32 level, const char *fmt, ...) { va_list va; struct va_format vaf; va_start(va, fmt); vaf.fmt = fmt; vaf.va = &va; if (!(qedi_dbg_log & level)) goto ret; if (likely(qedi) && likely(qedi->pdev)) pr_info("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev), func, line, qedi->host_no, &vaf); else pr_info("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf); ret: va_end(va); } int qedi_create_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter) { int ret = 0; for (; iter->name; iter++) { ret = sysfs_create_bin_file(&shost->shost_gendev.kobj, iter->attr); if (ret) pr_err("Unable to create sysfs %s attr, err(%d).\n", iter->name, ret); } return ret; } void qedi_remove_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter) { for (; iter->name; iter++) sysfs_remove_bin_file(&shost->shost_gendev.kobj, iter->attr); }
linux-master
drivers/scsi/qedi/qedi_dbg.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic iSCSI Offload Driver * Copyright (c) 2016 Cavium Inc. */ #include "qedi.h" #include "qedi_dbg.h" #include <linux/uaccess.h> #include <linux/debugfs.h> #include <linux/module.h> int qedi_do_not_recover; static struct dentry *qedi_dbg_root; void qedi_dbg_host_init(struct qedi_dbg_ctx *qedi, const struct qedi_debugfs_ops *dops, const struct file_operations *fops) { char host_dirname[32]; sprintf(host_dirname, "host%u", qedi->host_no); qedi->bdf_dentry = debugfs_create_dir(host_dirname, qedi_dbg_root); while (dops) { if (!(dops->name)) break; debugfs_create_file(dops->name, 0600, qedi->bdf_dentry, qedi, fops); dops++; fops++; } } void qedi_dbg_host_exit(struct qedi_dbg_ctx *qedi) { debugfs_remove_recursive(qedi->bdf_dentry); qedi->bdf_dentry = NULL; } void qedi_dbg_init(char *drv_name) { qedi_dbg_root = debugfs_create_dir(drv_name, NULL); } void qedi_dbg_exit(void) { debugfs_remove_recursive(qedi_dbg_root); qedi_dbg_root = NULL; } static ssize_t qedi_dbg_do_not_recover_enable(struct qedi_dbg_ctx *qedi_dbg) { if (!qedi_do_not_recover) qedi_do_not_recover = 1; QEDI_INFO(qedi_dbg, QEDI_LOG_DEBUGFS, "do_not_recover=%d\n", qedi_do_not_recover); return 0; } static ssize_t qedi_dbg_do_not_recover_disable(struct qedi_dbg_ctx *qedi_dbg) { if (qedi_do_not_recover) qedi_do_not_recover = 0; QEDI_INFO(qedi_dbg, QEDI_LOG_DEBUGFS, "do_not_recover=%d\n", qedi_do_not_recover); return 0; } static struct qedi_list_of_funcs qedi_dbg_do_not_recover_ops[] = { { "enable", qedi_dbg_do_not_recover_enable }, { "disable", qedi_dbg_do_not_recover_disable }, { NULL, NULL } }; const struct qedi_debugfs_ops qedi_debugfs_ops[] = { { "gbl_ctx", NULL }, { "do_not_recover", qedi_dbg_do_not_recover_ops}, { "io_trace", NULL }, { NULL, NULL } }; static ssize_t qedi_dbg_do_not_recover_cmd_write(struct file *filp, const char __user *buffer, size_t count, loff_t *ppos) { size_t cnt = 0; struct qedi_dbg_ctx *qedi_dbg = (struct qedi_dbg_ctx *)filp->private_data; struct qedi_list_of_funcs *lof = qedi_dbg_do_not_recover_ops; if (*ppos) return 0; while (lof) { if (!(lof->oper_str)) break; if (!strncmp(lof->oper_str, buffer, strlen(lof->oper_str))) { cnt = lof->oper_func(qedi_dbg); break; } lof++; } return (count - cnt); } static ssize_t qedi_dbg_do_not_recover_cmd_read(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { size_t cnt = 0; if (*ppos) return 0; cnt = sprintf(buffer, "do_not_recover=%d\n", qedi_do_not_recover); cnt = min_t(int, count, cnt - *ppos); *ppos += cnt; return cnt; } static int qedi_gbl_ctx_show(struct seq_file *s, void *unused) { struct qedi_fastpath *fp = NULL; struct qed_sb_info *sb_info = NULL; struct status_block *sb = NULL; struct global_queue *que = NULL; int id; u16 prod_idx; struct qedi_ctx *qedi = s->private; unsigned long flags; seq_puts(s, " DUMP CQ CONTEXT:\n"); for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) { spin_lock_irqsave(&qedi->hba_lock, flags); seq_printf(s, "=========FAST CQ PATH [%d] ==========\n", id); fp = &qedi->fp_array[id]; sb_info = fp->sb_info; sb = sb_info->sb_virt; prod_idx = (sb->pi_array[QEDI_PROTO_CQ_PROD_IDX] & STATUS_BLOCK_PROD_INDEX_MASK); seq_printf(s, "SB PROD IDX: %d\n", prod_idx); que = qedi->global_queues[fp->sb_id]; seq_printf(s, "DRV CONS IDX: %d\n", que->cq_cons_idx); seq_printf(s, "CQ complete host memory: %d\n", fp->sb_id); seq_puts(s, "=========== END ==================\n\n\n"); spin_unlock_irqrestore(&qedi->hba_lock, flags); } return 0; } static int qedi_dbg_gbl_ctx_open(struct inode *inode, struct file *file) { struct qedi_dbg_ctx *qedi_dbg = inode->i_private; struct qedi_ctx *qedi = container_of(qedi_dbg, struct qedi_ctx, dbg_ctx); return single_open(file, qedi_gbl_ctx_show, qedi); } static int qedi_io_trace_show(struct seq_file *s, void *unused) { int id, idx = 0; struct qedi_ctx *qedi = s->private; struct qedi_io_log *io_log; unsigned long flags; seq_puts(s, " DUMP IO LOGS:\n"); spin_lock_irqsave(&qedi->io_trace_lock, flags); idx = qedi->io_trace_idx; for (id = 0; id < QEDI_IO_TRACE_SIZE; id++) { io_log = &qedi->io_trace_buf[idx]; seq_printf(s, "iodir-%d:", io_log->direction); seq_printf(s, "tid-0x%x:", io_log->task_id); seq_printf(s, "cid-0x%x:", io_log->cid); seq_printf(s, "lun-%d:", io_log->lun); seq_printf(s, "op-0x%02x:", io_log->op); seq_printf(s, "0x%02x%02x%02x%02x:", io_log->lba[0], io_log->lba[1], io_log->lba[2], io_log->lba[3]); seq_printf(s, "buflen-%d:", io_log->bufflen); seq_printf(s, "sgcnt-%d:", io_log->sg_count); seq_printf(s, "res-0x%08x:", io_log->result); seq_printf(s, "jif-%lu:", io_log->jiffies); seq_printf(s, "blk_req_cpu-%d:", io_log->blk_req_cpu); seq_printf(s, "req_cpu-%d:", io_log->req_cpu); seq_printf(s, "intr_cpu-%d:", io_log->intr_cpu); seq_printf(s, "blk_rsp_cpu-%d\n", io_log->blk_rsp_cpu); idx++; if (idx == QEDI_IO_TRACE_SIZE) idx = 0; } spin_unlock_irqrestore(&qedi->io_trace_lock, flags); return 0; } static int qedi_dbg_io_trace_open(struct inode *inode, struct file *file) { struct qedi_dbg_ctx *qedi_dbg = inode->i_private; struct qedi_ctx *qedi = container_of(qedi_dbg, struct qedi_ctx, dbg_ctx); return single_open(file, qedi_io_trace_show, qedi); } const struct file_operations qedi_dbg_fops[] = { qedi_dbg_fileops_seq(qedi, gbl_ctx), qedi_dbg_fileops(qedi, do_not_recover), qedi_dbg_fileops_seq(qedi, io_trace), { }, };
linux-master
drivers/scsi/qedi/qedi_debugfs.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic iSCSI Offload Driver * Copyright (c) 2016 Cavium Inc. */ #include <linux/blkdev.h> #include <scsi/scsi_tcq.h> #include <linux/delay.h> #include "qedi.h" #include "qedi_iscsi.h" #include "qedi_gbl.h" #include "qedi_fw_iscsi.h" #include "qedi_fw_scsi.h" static int send_iscsi_tmf(struct qedi_conn *qedi_conn, struct iscsi_task *mtask, struct iscsi_task *ctask); void qedi_iscsi_unmap_sg_list(struct qedi_cmd *cmd) { struct scsi_cmnd *sc = cmd->scsi_cmd; if (cmd->io_tbl.sge_valid && sc) { cmd->io_tbl.sge_valid = 0; scsi_dma_unmap(sc); } } static void qedi_process_logout_resp(struct qedi_ctx *qedi, union iscsi_cqe *cqe, struct iscsi_task *task, struct qedi_conn *qedi_conn) { struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; struct iscsi_logout_rsp *resp_hdr; struct iscsi_session *session = conn->session; struct iscsi_logout_response_hdr *cqe_logout_response; struct qedi_cmd *cmd; cmd = (struct qedi_cmd *)task->dd_data; cqe_logout_response = &cqe->cqe_common.iscsi_hdr.logout_response; spin_lock(&session->back_lock); resp_hdr = (struct iscsi_logout_rsp *)&qedi_conn->gen_pdu.resp_hdr; memset(resp_hdr, 0, sizeof(struct iscsi_hdr)); resp_hdr->opcode = cqe_logout_response->opcode; resp_hdr->flags = cqe_logout_response->flags; resp_hdr->hlength = 0; resp_hdr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age); resp_hdr->statsn = cpu_to_be32(cqe_logout_response->stat_sn); resp_hdr->exp_cmdsn = cpu_to_be32(cqe_logout_response->exp_cmd_sn); resp_hdr->max_cmdsn = cpu_to_be32(cqe_logout_response->max_cmd_sn); resp_hdr->t2wait = cpu_to_be32(cqe_logout_response->time_2_wait); resp_hdr->t2retain = cpu_to_be32(cqe_logout_response->time_2_retain); QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID, "Freeing tid=0x%x for cid=0x%x\n", cmd->task_id, qedi_conn->iscsi_conn_id); spin_lock(&qedi_conn->list_lock); if (likely(cmd->io_cmd_in_list)) { cmd->io_cmd_in_list = false; list_del_init(&cmd->io_cmd); qedi_conn->active_cmd_count--; } else { QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "Active cmd list node already deleted, tid=0x%x, cid=0x%x, io_cmd_node=%p\n", cmd->task_id, qedi_conn->iscsi_conn_id, &cmd->io_cmd); } spin_unlock(&qedi_conn->list_lock); cmd->state = RESPONSE_RECEIVED; __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0); spin_unlock(&session->back_lock); } static void qedi_process_text_resp(struct qedi_ctx *qedi, union iscsi_cqe *cqe, struct iscsi_task *task, struct qedi_conn *qedi_conn) { struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; struct iscsi_session *session = conn->session; struct iscsi_task_context *task_ctx; struct iscsi_text_rsp *resp_hdr_ptr; struct iscsi_text_response_hdr *cqe_text_response; struct qedi_cmd *cmd; int pld_len; cmd = (struct qedi_cmd *)task->dd_data; task_ctx = qedi_get_task_mem(&qedi->tasks, cmd->task_id); cqe_text_response = &cqe->cqe_common.iscsi_hdr.text_response; spin_lock(&session->back_lock); resp_hdr_ptr = (struct iscsi_text_rsp *)&qedi_conn->gen_pdu.resp_hdr; memset(resp_hdr_ptr, 0, sizeof(struct iscsi_hdr)); resp_hdr_ptr->opcode = cqe_text_response->opcode; resp_hdr_ptr->flags = cqe_text_response->flags; resp_hdr_ptr->hlength = 0; hton24(resp_hdr_ptr->dlength, (cqe_text_response->hdr_second_dword & ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK)); resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age); resp_hdr_ptr->ttt = cqe_text_response->ttt; resp_hdr_ptr->statsn = cpu_to_be32(cqe_text_response->stat_sn); resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_text_response->exp_cmd_sn); resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_text_response->max_cmd_sn); pld_len = cqe_text_response->hdr_second_dword & ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK; qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len; memset(task_ctx, '\0', sizeof(*task_ctx)); QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID, "Freeing tid=0x%x for cid=0x%x\n", cmd->task_id, qedi_conn->iscsi_conn_id); spin_lock(&qedi_conn->list_lock); if (likely(cmd->io_cmd_in_list)) { cmd->io_cmd_in_list = false; list_del_init(&cmd->io_cmd); qedi_conn->active_cmd_count--; } else { QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "Active cmd list node already deleted, tid=0x%x, cid=0x%x, io_cmd_node=%p\n", cmd->task_id, qedi_conn->iscsi_conn_id, &cmd->io_cmd); } spin_unlock(&qedi_conn->list_lock); cmd->state = RESPONSE_RECEIVED; __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, qedi_conn->gen_pdu.resp_buf, (qedi_conn->gen_pdu.resp_wr_ptr - qedi_conn->gen_pdu.resp_buf)); spin_unlock(&session->back_lock); } static void qedi_tmf_resp_work(struct work_struct *work) { struct qedi_cmd *qedi_cmd = container_of(work, struct qedi_cmd, tmf_work); struct qedi_conn *qedi_conn = qedi_cmd->conn; struct qedi_ctx *qedi = qedi_conn->qedi; struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; struct iscsi_session *session = conn->session; struct iscsi_tm_rsp *resp_hdr_ptr; int rval = 0; resp_hdr_ptr = (struct iscsi_tm_rsp *)qedi_cmd->tmf_resp_buf; rval = qedi_cleanup_all_io(qedi, qedi_conn, qedi_cmd->task, true); if (rval) goto exit_tmf_resp; spin_lock(&session->back_lock); __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0); spin_unlock(&session->back_lock); exit_tmf_resp: kfree(resp_hdr_ptr); spin_lock(&qedi_conn->tmf_work_lock); qedi_conn->fw_cleanup_works--; spin_unlock(&qedi_conn->tmf_work_lock); } static void qedi_process_tmf_resp(struct qedi_ctx *qedi, union iscsi_cqe *cqe, struct iscsi_task *task, struct qedi_conn *qedi_conn) { struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; struct iscsi_session *session = conn->session; struct iscsi_tmf_response_hdr *cqe_tmp_response; struct iscsi_tm_rsp *resp_hdr_ptr; struct iscsi_tm *tmf_hdr; struct qedi_cmd *qedi_cmd = NULL; cqe_tmp_response = &cqe->cqe_common.iscsi_hdr.tmf_response; qedi_cmd = task->dd_data; qedi_cmd->tmf_resp_buf = kzalloc(sizeof(*resp_hdr_ptr), GFP_ATOMIC); if (!qedi_cmd->tmf_resp_buf) { QEDI_ERR(&qedi->dbg_ctx, "Failed to allocate resp buf, cid=0x%x\n", qedi_conn->iscsi_conn_id); return; } spin_lock(&session->back_lock); resp_hdr_ptr = (struct iscsi_tm_rsp *)qedi_cmd->tmf_resp_buf; memset(resp_hdr_ptr, 0, sizeof(struct iscsi_tm_rsp)); /* Fill up the header */ resp_hdr_ptr->opcode = cqe_tmp_response->opcode; resp_hdr_ptr->flags = cqe_tmp_response->hdr_flags; resp_hdr_ptr->response = cqe_tmp_response->hdr_response; resp_hdr_ptr->hlength = 0; hton24(resp_hdr_ptr->dlength, (cqe_tmp_response->hdr_second_dword & ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK)); resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age); resp_hdr_ptr->statsn = cpu_to_be32(cqe_tmp_response->stat_sn); resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_tmp_response->exp_cmd_sn); resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_tmp_response->max_cmd_sn); tmf_hdr = (struct iscsi_tm *)qedi_cmd->task->hdr; spin_lock(&qedi_conn->list_lock); if (likely(qedi_cmd->io_cmd_in_list)) { qedi_cmd->io_cmd_in_list = false; list_del_init(&qedi_cmd->io_cmd); qedi_conn->active_cmd_count--; } spin_unlock(&qedi_conn->list_lock); spin_lock(&qedi_conn->tmf_work_lock); switch (tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) { case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET: case ISCSI_TM_FUNC_TARGET_WARM_RESET: case ISCSI_TM_FUNC_TARGET_COLD_RESET: if (qedi_conn->ep_disconnect_starting) { /* Session is down so ep_disconnect will clean up */ spin_unlock(&qedi_conn->tmf_work_lock); goto unblock_sess; } qedi_conn->fw_cleanup_works++; spin_unlock(&qedi_conn->tmf_work_lock); INIT_WORK(&qedi_cmd->tmf_work, qedi_tmf_resp_work); queue_work(qedi->tmf_thread, &qedi_cmd->tmf_work); goto unblock_sess; } spin_unlock(&qedi_conn->tmf_work_lock); __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0); kfree(resp_hdr_ptr); unblock_sess: spin_unlock(&session->back_lock); } static void qedi_process_login_resp(struct qedi_ctx *qedi, union iscsi_cqe *cqe, struct iscsi_task *task, struct qedi_conn *qedi_conn) { struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; struct iscsi_session *session = conn->session; struct iscsi_task_context *task_ctx; struct iscsi_login_rsp *resp_hdr_ptr; struct iscsi_login_response_hdr *cqe_login_response; struct qedi_cmd *cmd; int pld_len; cmd = (struct qedi_cmd *)task->dd_data; cqe_login_response = &cqe->cqe_common.iscsi_hdr.login_response; task_ctx = qedi_get_task_mem(&qedi->tasks, cmd->task_id); spin_lock(&session->back_lock); resp_hdr_ptr = (struct iscsi_login_rsp *)&qedi_conn->gen_pdu.resp_hdr; memset(resp_hdr_ptr, 0, sizeof(struct iscsi_login_rsp)); resp_hdr_ptr->opcode = cqe_login_response->opcode; resp_hdr_ptr->flags = cqe_login_response->flags_attr; resp_hdr_ptr->hlength = 0; hton24(resp_hdr_ptr->dlength, (cqe_login_response->hdr_second_dword & ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK)); resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age); resp_hdr_ptr->tsih = cqe_login_response->tsih; resp_hdr_ptr->statsn = cpu_to_be32(cqe_login_response->stat_sn); resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_login_response->exp_cmd_sn); resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_login_response->max_cmd_sn); resp_hdr_ptr->status_class = cqe_login_response->status_class; resp_hdr_ptr->status_detail = cqe_login_response->status_detail; pld_len = cqe_login_response->hdr_second_dword & ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK; qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len; spin_lock(&qedi_conn->list_lock); if (likely(cmd->io_cmd_in_list)) { cmd->io_cmd_in_list = false; list_del_init(&cmd->io_cmd); qedi_conn->active_cmd_count--; } spin_unlock(&qedi_conn->list_lock); memset(task_ctx, '\0', sizeof(*task_ctx)); __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, qedi_conn->gen_pdu.resp_buf, (qedi_conn->gen_pdu.resp_wr_ptr - qedi_conn->gen_pdu.resp_buf)); spin_unlock(&session->back_lock); QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID, "Freeing tid=0x%x for cid=0x%x\n", cmd->task_id, qedi_conn->iscsi_conn_id); cmd->state = RESPONSE_RECEIVED; } static void qedi_get_rq_bdq_buf(struct qedi_ctx *qedi, struct iscsi_cqe_unsolicited *cqe, char *ptr, int len) { u16 idx = 0; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "pld_len [%d], bdq_prod_idx [%d], idx [%d]\n", len, qedi->bdq_prod_idx, (qedi->bdq_prod_idx % qedi->rq_num_entries)); /* Obtain buffer address from rqe_opaque */ idx = cqe->rqe_opaque; if (idx > (QEDI_BDQ_NUM - 1)) { QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "wrong idx %d returned by FW, dropping the unsolicited pkt\n", idx); return; } QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "rqe_opaque [0x%p], idx [%d]\n", cqe->rqe_opaque, idx); QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "unsol_cqe_type = %d\n", cqe->unsol_cqe_type); switch (cqe->unsol_cqe_type) { case ISCSI_CQE_UNSOLICITED_SINGLE: case ISCSI_CQE_UNSOLICITED_FIRST: if (len) memcpy(ptr, (void *)qedi->bdq[idx].buf_addr, len); break; case ISCSI_CQE_UNSOLICITED_MIDDLE: case ISCSI_CQE_UNSOLICITED_LAST: break; default: break; } } static void qedi_put_rq_bdq_buf(struct qedi_ctx *qedi, struct iscsi_cqe_unsolicited *cqe, int count) { u16 idx = 0; struct scsi_bd *pbl; /* Obtain buffer address from rqe_opaque */ idx = cqe->rqe_opaque; if (idx > (QEDI_BDQ_NUM - 1)) { QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "wrong idx %d returned by FW, dropping the unsolicited pkt\n", idx); return; } pbl = (struct scsi_bd *)qedi->bdq_pbl; pbl += (qedi->bdq_prod_idx % qedi->rq_num_entries); pbl->address.hi = cpu_to_le32(QEDI_U64_HI(qedi->bdq[idx].buf_dma)); pbl->address.lo = cpu_to_le32(QEDI_U64_LO(qedi->bdq[idx].buf_dma)); QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx] idx [%d]\n", pbl, pbl->address.hi, pbl->address.lo, idx); pbl->opaque.iscsi_opaque.reserved_zero[0] = 0; pbl->opaque.iscsi_opaque.reserved_zero[1] = 0; pbl->opaque.iscsi_opaque.reserved_zero[2] = 0; pbl->opaque.iscsi_opaque.opaque = cpu_to_le32(idx); /* Increment producer to let f/w know we've handled the frame */ qedi->bdq_prod_idx += count; writew(qedi->bdq_prod_idx, qedi->bdq_primary_prod); readw(qedi->bdq_primary_prod); writew(qedi->bdq_prod_idx, qedi->bdq_secondary_prod); readw(qedi->bdq_secondary_prod); } static void qedi_unsol_pdu_adjust_bdq(struct qedi_ctx *qedi, struct iscsi_cqe_unsolicited *cqe, u32 pdu_len, u32 num_bdqs, char *bdq_data) { QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "num_bdqs [%d]\n", num_bdqs); qedi_get_rq_bdq_buf(qedi, cqe, bdq_data, pdu_len); qedi_put_rq_bdq_buf(qedi, cqe, (num_bdqs + 1)); } static int qedi_process_nopin_mesg(struct qedi_ctx *qedi, union iscsi_cqe *cqe, struct iscsi_task *task, struct qedi_conn *qedi_conn, u16 que_idx) { struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; struct iscsi_session *session = conn->session; struct iscsi_nop_in_hdr *cqe_nop_in; struct iscsi_nopin *hdr; struct qedi_cmd *cmd; int tgt_async_nop = 0; u32 lun[2]; u32 pdu_len, num_bdqs; char bdq_data[QEDI_BDQ_BUF_SIZE]; unsigned long flags; spin_lock_bh(&session->back_lock); cqe_nop_in = &cqe->cqe_common.iscsi_hdr.nop_in; pdu_len = cqe_nop_in->hdr_second_dword & ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK; num_bdqs = pdu_len / QEDI_BDQ_BUF_SIZE; hdr = (struct iscsi_nopin *)&qedi_conn->gen_pdu.resp_hdr; memset(hdr, 0, sizeof(struct iscsi_hdr)); hdr->opcode = cqe_nop_in->opcode; hdr->max_cmdsn = cpu_to_be32(cqe_nop_in->max_cmd_sn); hdr->exp_cmdsn = cpu_to_be32(cqe_nop_in->exp_cmd_sn); hdr->statsn = cpu_to_be32(cqe_nop_in->stat_sn); hdr->ttt = cpu_to_be32(cqe_nop_in->ttt); if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) { spin_lock_irqsave(&qedi->hba_lock, flags); qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited, pdu_len, num_bdqs, bdq_data); hdr->itt = RESERVED_ITT; tgt_async_nop = 1; spin_unlock_irqrestore(&qedi->hba_lock, flags); goto done; } /* Response to one of our nop-outs */ if (task) { cmd = task->dd_data; hdr->flags = ISCSI_FLAG_CMD_FINAL; hdr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age); lun[0] = 0xffffffff; lun[1] = 0xffffffff; memcpy(&hdr->lun, lun, sizeof(struct scsi_lun)); QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID, "Freeing tid=0x%x for cid=0x%x\n", cmd->task_id, qedi_conn->iscsi_conn_id); cmd->state = RESPONSE_RECEIVED; spin_lock(&qedi_conn->list_lock); if (likely(cmd->io_cmd_in_list)) { cmd->io_cmd_in_list = false; list_del_init(&cmd->io_cmd); qedi_conn->active_cmd_count--; } spin_unlock(&qedi_conn->list_lock); } done: __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, bdq_data, pdu_len); spin_unlock_bh(&session->back_lock); return tgt_async_nop; } static void qedi_process_async_mesg(struct qedi_ctx *qedi, union iscsi_cqe *cqe, struct iscsi_task *task, struct qedi_conn *qedi_conn, u16 que_idx) { struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; struct iscsi_session *session = conn->session; struct iscsi_async_msg_hdr *cqe_async_msg; struct iscsi_async *resp_hdr; u32 lun[2]; u32 pdu_len, num_bdqs; char bdq_data[QEDI_BDQ_BUF_SIZE]; unsigned long flags; spin_lock_bh(&session->back_lock); cqe_async_msg = &cqe->cqe_common.iscsi_hdr.async_msg; pdu_len = cqe_async_msg->hdr_second_dword & ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK; num_bdqs = pdu_len / QEDI_BDQ_BUF_SIZE; if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) { spin_lock_irqsave(&qedi->hba_lock, flags); qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited, pdu_len, num_bdqs, bdq_data); spin_unlock_irqrestore(&qedi->hba_lock, flags); } resp_hdr = (struct iscsi_async *)&qedi_conn->gen_pdu.resp_hdr; memset(resp_hdr, 0, sizeof(struct iscsi_hdr)); resp_hdr->opcode = cqe_async_msg->opcode; resp_hdr->flags = 0x80; lun[0] = cpu_to_be32(cqe_async_msg->lun.lo); lun[1] = cpu_to_be32(cqe_async_msg->lun.hi); memcpy(&resp_hdr->lun, lun, sizeof(struct scsi_lun)); resp_hdr->exp_cmdsn = cpu_to_be32(cqe_async_msg->exp_cmd_sn); resp_hdr->max_cmdsn = cpu_to_be32(cqe_async_msg->max_cmd_sn); resp_hdr->statsn = cpu_to_be32(cqe_async_msg->stat_sn); resp_hdr->async_event = cqe_async_msg->async_event; resp_hdr->async_vcode = cqe_async_msg->async_vcode; resp_hdr->param1 = cpu_to_be16(cqe_async_msg->param1_rsrv); resp_hdr->param2 = cpu_to_be16(cqe_async_msg->param2_rsrv); resp_hdr->param3 = cpu_to_be16(cqe_async_msg->param3_rsrv); __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, bdq_data, pdu_len); spin_unlock_bh(&session->back_lock); } static void qedi_process_reject_mesg(struct qedi_ctx *qedi, union iscsi_cqe *cqe, struct iscsi_task *task, struct qedi_conn *qedi_conn, uint16_t que_idx) { struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; struct iscsi_session *session = conn->session; struct iscsi_reject_hdr *cqe_reject; struct iscsi_reject *hdr; u32 pld_len, num_bdqs; unsigned long flags; spin_lock_bh(&session->back_lock); cqe_reject = &cqe->cqe_common.iscsi_hdr.reject; pld_len = cqe_reject->hdr_second_dword & ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK; num_bdqs = pld_len / QEDI_BDQ_BUF_SIZE; if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) { spin_lock_irqsave(&qedi->hba_lock, flags); qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited, pld_len, num_bdqs, conn->data); spin_unlock_irqrestore(&qedi->hba_lock, flags); } hdr = (struct iscsi_reject *)&qedi_conn->gen_pdu.resp_hdr; memset(hdr, 0, sizeof(struct iscsi_hdr)); hdr->opcode = cqe_reject->opcode; hdr->reason = cqe_reject->hdr_reason; hdr->flags = cqe_reject->hdr_flags; hton24(hdr->dlength, (cqe_reject->hdr_second_dword & ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK)); hdr->max_cmdsn = cpu_to_be32(cqe_reject->max_cmd_sn); hdr->exp_cmdsn = cpu_to_be32(cqe_reject->exp_cmd_sn); hdr->statsn = cpu_to_be32(cqe_reject->stat_sn); hdr->ffffffff = cpu_to_be32(0xffffffff); __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, conn->data, pld_len); spin_unlock_bh(&session->back_lock); } static void qedi_scsi_completion(struct qedi_ctx *qedi, union iscsi_cqe *cqe, struct iscsi_task *task, struct iscsi_conn *conn) { struct scsi_cmnd *sc_cmd; struct qedi_cmd *cmd = task->dd_data; struct iscsi_session *session = conn->session; struct iscsi_scsi_rsp *hdr; struct iscsi_data_in_hdr *cqe_data_in; int datalen = 0; struct qedi_conn *qedi_conn; u32 iscsi_cid; u8 cqe_err_bits = 0; iscsi_cid = cqe->cqe_common.conn_id; qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid]; cqe_data_in = &cqe->cqe_common.iscsi_hdr.data_in; cqe_err_bits = cqe->cqe_common.error_bitmap.error_bits.cqe_error_status_bits; spin_lock_bh(&session->back_lock); /* get the scsi command */ sc_cmd = cmd->scsi_cmd; if (!sc_cmd) { QEDI_WARN(&qedi->dbg_ctx, "sc_cmd is NULL!\n"); goto error; } if (!iscsi_cmd(sc_cmd)->task) { QEDI_WARN(&qedi->dbg_ctx, "NULL task pointer, returned in another context.\n"); goto error; } if (!scsi_cmd_to_rq(sc_cmd)->q) { QEDI_WARN(&qedi->dbg_ctx, "request->q is NULL so request is not valid, sc_cmd=%p.\n", sc_cmd); goto error; } qedi_iscsi_unmap_sg_list(cmd); hdr = (struct iscsi_scsi_rsp *)task->hdr; hdr->opcode = cqe_data_in->opcode; hdr->max_cmdsn = cpu_to_be32(cqe_data_in->max_cmd_sn); hdr->exp_cmdsn = cpu_to_be32(cqe_data_in->exp_cmd_sn); hdr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age); hdr->response = cqe_data_in->reserved1; hdr->cmd_status = cqe_data_in->status_rsvd; hdr->flags = cqe_data_in->flags; hdr->residual_count = cpu_to_be32(cqe_data_in->residual_count); if (hdr->cmd_status == SAM_STAT_CHECK_CONDITION) { datalen = cqe_data_in->reserved2 & ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK; memcpy((char *)conn->data, (char *)cmd->sense_buffer, datalen); } /* If f/w reports data underrun err then set residual to IO transfer * length, set Underrun flag and clear Overrun flag explicitly */ if (unlikely(cqe_err_bits && GET_FIELD(cqe_err_bits, CQE_ERROR_BITMAP_UNDER_RUN_ERR))) { QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "Under flow itt=0x%x proto flags=0x%x tid=0x%x cid 0x%x fw resid 0x%x sc dlen 0x%x\n", hdr->itt, cqe_data_in->flags, cmd->task_id, qedi_conn->iscsi_conn_id, hdr->residual_count, scsi_bufflen(sc_cmd)); hdr->residual_count = cpu_to_be32(scsi_bufflen(sc_cmd)); hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW; hdr->flags &= (~ISCSI_FLAG_CMD_OVERFLOW); } spin_lock(&qedi_conn->list_lock); if (likely(cmd->io_cmd_in_list)) { cmd->io_cmd_in_list = false; list_del_init(&cmd->io_cmd); qedi_conn->active_cmd_count--; } spin_unlock(&qedi_conn->list_lock); QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID, "Freeing tid=0x%x for cid=0x%x\n", cmd->task_id, qedi_conn->iscsi_conn_id); cmd->state = RESPONSE_RECEIVED; if (qedi_io_tracing) qedi_trace_io(qedi, task, cmd->task_id, QEDI_IO_TRACE_RSP); __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, conn->data, datalen); error: spin_unlock_bh(&session->back_lock); } static void qedi_mtask_completion(struct qedi_ctx *qedi, union iscsi_cqe *cqe, struct iscsi_task *task, struct qedi_conn *conn, uint16_t que_idx) { struct iscsi_conn *iscsi_conn; u32 hdr_opcode; hdr_opcode = cqe->cqe_common.iscsi_hdr.common.hdr_first_byte; iscsi_conn = conn->cls_conn->dd_data; switch (hdr_opcode) { case ISCSI_OPCODE_SCSI_RESPONSE: case ISCSI_OPCODE_DATA_IN: qedi_scsi_completion(qedi, cqe, task, iscsi_conn); break; case ISCSI_OPCODE_LOGIN_RESPONSE: qedi_process_login_resp(qedi, cqe, task, conn); break; case ISCSI_OPCODE_TMF_RESPONSE: qedi_process_tmf_resp(qedi, cqe, task, conn); break; case ISCSI_OPCODE_TEXT_RESPONSE: qedi_process_text_resp(qedi, cqe, task, conn); break; case ISCSI_OPCODE_LOGOUT_RESPONSE: qedi_process_logout_resp(qedi, cqe, task, conn); break; case ISCSI_OPCODE_NOP_IN: qedi_process_nopin_mesg(qedi, cqe, task, conn, que_idx); break; default: QEDI_ERR(&qedi->dbg_ctx, "unknown opcode\n"); } } static void qedi_process_nopin_local_cmpl(struct qedi_ctx *qedi, struct iscsi_cqe_solicited *cqe, struct iscsi_task *task, struct qedi_conn *qedi_conn) { struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; struct iscsi_session *session = conn->session; struct qedi_cmd *cmd = task->dd_data; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_UNSOL, "itid=0x%x, cmd task id=0x%x\n", cqe->itid, cmd->task_id); cmd->state = RESPONSE_RECEIVED; spin_lock_bh(&session->back_lock); __iscsi_put_task(task); spin_unlock_bh(&session->back_lock); } static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi, struct iscsi_cqe_solicited *cqe, struct iscsi_conn *conn) { struct qedi_work_map *work, *work_tmp; u32 proto_itt = cqe->itid; int found = 0; struct qedi_cmd *qedi_cmd = NULL; u32 iscsi_cid; struct qedi_conn *qedi_conn; struct qedi_cmd *dbg_cmd; struct iscsi_task *mtask, *task; struct iscsi_tm *tmf_hdr = NULL; iscsi_cid = cqe->conn_id; qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid]; if (!qedi_conn) { QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "icid not found 0x%x\n", cqe->conn_id); return; } /* Based on this itt get the corresponding qedi_cmd */ spin_lock_bh(&qedi_conn->tmf_work_lock); list_for_each_entry_safe(work, work_tmp, &qedi_conn->tmf_work_list, list) { if (work->rtid == proto_itt) { /* We found the command */ qedi_cmd = work->qedi_cmd; if (!qedi_cmd->list_tmf_work) { QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "TMF work not found, cqe->tid=0x%x, cid=0x%x\n", proto_itt, qedi_conn->iscsi_conn_id); WARN_ON(1); } found = 1; mtask = qedi_cmd->task; task = work->ctask; tmf_hdr = (struct iscsi_tm *)mtask->hdr; list_del_init(&work->list); kfree(work); qedi_cmd->list_tmf_work = NULL; } } spin_unlock_bh(&qedi_conn->tmf_work_lock); if (!found) goto check_cleanup_reqs; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "TMF work, cqe->tid=0x%x, tmf flags=0x%x, cid=0x%x\n", proto_itt, tmf_hdr->flags, qedi_conn->iscsi_conn_id); spin_lock_bh(&conn->session->back_lock); if (iscsi_task_is_completed(task)) { QEDI_NOTICE(&qedi->dbg_ctx, "IO task completed, tmf rtt=0x%x, cid=0x%x\n", get_itt(tmf_hdr->rtt), qedi_conn->iscsi_conn_id); goto unlock; } dbg_cmd = task->dd_data; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "Abort tmf rtt=0x%x, i/o itt=0x%x, i/o tid=0x%x, cid=0x%x\n", get_itt(tmf_hdr->rtt), get_itt(task->itt), dbg_cmd->task_id, qedi_conn->iscsi_conn_id); spin_lock(&qedi_conn->list_lock); if (likely(dbg_cmd->io_cmd_in_list)) { dbg_cmd->io_cmd_in_list = false; list_del_init(&dbg_cmd->io_cmd); qedi_conn->active_cmd_count--; } spin_unlock(&qedi_conn->list_lock); qedi_cmd->state = CLEANUP_RECV; unlock: spin_unlock_bh(&conn->session->back_lock); wake_up_interruptible(&qedi_conn->wait_queue); return; check_cleanup_reqs: if (atomic_inc_return(&qedi_conn->cmd_cleanup_cmpl) == qedi_conn->cmd_cleanup_req) { QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "Freeing tid=0x%x for cid=0x%x\n", cqe->itid, qedi_conn->iscsi_conn_id); wake_up(&qedi_conn->wait_queue); } } void qedi_fp_process_cqes(struct qedi_work *work) { struct qedi_ctx *qedi = work->qedi; union iscsi_cqe *cqe = &work->cqe; struct iscsi_task *task = NULL; struct iscsi_nopout *nopout_hdr; struct qedi_conn *q_conn; struct iscsi_conn *conn; struct qedi_cmd *qedi_cmd; u32 comp_type; u32 iscsi_cid; u32 hdr_opcode; u16 que_idx = work->que_idx; u8 cqe_err_bits = 0; comp_type = cqe->cqe_common.cqe_type; hdr_opcode = cqe->cqe_common.iscsi_hdr.common.hdr_first_byte; cqe_err_bits = cqe->cqe_common.error_bitmap.error_bits.cqe_error_status_bits; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "fw_cid=0x%x, cqe type=0x%x, opcode=0x%x\n", cqe->cqe_common.conn_id, comp_type, hdr_opcode); if (comp_type >= MAX_ISCSI_CQES_TYPE) { QEDI_WARN(&qedi->dbg_ctx, "Invalid CqE type\n"); return; } iscsi_cid = cqe->cqe_common.conn_id; q_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid]; if (!q_conn) { QEDI_WARN(&qedi->dbg_ctx, "Session no longer exists for cid=0x%x!!\n", iscsi_cid); return; } conn = q_conn->cls_conn->dd_data; if (unlikely(cqe_err_bits && GET_FIELD(cqe_err_bits, CQE_ERROR_BITMAP_DATA_DIGEST_ERR))) { iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST); return; } switch (comp_type) { case ISCSI_CQE_TYPE_SOLICITED: case ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE: qedi_cmd = container_of(work, struct qedi_cmd, cqe_work); task = qedi_cmd->task; if (!task) { QEDI_WARN(&qedi->dbg_ctx, "task is NULL\n"); return; } /* Process NOPIN local completion */ nopout_hdr = (struct iscsi_nopout *)task->hdr; if ((nopout_hdr->itt == RESERVED_ITT) && (cqe->cqe_solicited.itid != (u16)RESERVED_ITT)) { qedi_process_nopin_local_cmpl(qedi, &cqe->cqe_solicited, task, q_conn); } else { cqe->cqe_solicited.itid = qedi_get_itt(cqe->cqe_solicited); /* Process other solicited responses */ qedi_mtask_completion(qedi, cqe, task, q_conn, que_idx); } break; case ISCSI_CQE_TYPE_UNSOLICITED: switch (hdr_opcode) { case ISCSI_OPCODE_NOP_IN: qedi_process_nopin_mesg(qedi, cqe, task, q_conn, que_idx); break; case ISCSI_OPCODE_ASYNC_MSG: qedi_process_async_mesg(qedi, cqe, task, q_conn, que_idx); break; case ISCSI_OPCODE_REJECT: qedi_process_reject_mesg(qedi, cqe, task, q_conn, que_idx); break; } goto exit_fp_process; case ISCSI_CQE_TYPE_DUMMY: QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "Dummy CqE\n"); goto exit_fp_process; case ISCSI_CQE_TYPE_TASK_CLEANUP: QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "CleanUp CqE\n"); qedi_process_cmd_cleanup_resp(qedi, &cqe->cqe_solicited, conn); goto exit_fp_process; default: QEDI_ERR(&qedi->dbg_ctx, "Error cqe.\n"); break; } exit_fp_process: return; } static void qedi_ring_doorbell(struct qedi_conn *qedi_conn) { qedi_conn->ep->db_data.sq_prod = qedi_conn->ep->fw_sq_prod_idx; /* wmb - Make sure fw idx is coherent */ wmb(); writel(*(u32 *)&qedi_conn->ep->db_data, qedi_conn->ep->p_doorbell); /* Make sure fw write idx is coherent, and include both memory barriers * as a failsafe as for some architectures the call is the same but on * others they are two different assembly operations. */ wmb(); QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_MP_REQ, "prod_idx=0x%x, fw_prod_idx=0x%x, cid=0x%x\n", qedi_conn->ep->sq_prod_idx, qedi_conn->ep->fw_sq_prod_idx, qedi_conn->iscsi_conn_id); } static u16 qedi_get_wqe_idx(struct qedi_conn *qedi_conn) { struct qedi_endpoint *ep; u16 rval; ep = qedi_conn->ep; rval = ep->sq_prod_idx; /* Increament SQ index */ ep->sq_prod_idx++; ep->fw_sq_prod_idx++; if (ep->sq_prod_idx == QEDI_SQ_SIZE) ep->sq_prod_idx = 0; return rval; } int qedi_send_iscsi_login(struct qedi_conn *qedi_conn, struct iscsi_task *task) { struct iscsi_login_req_hdr login_req_pdu_header; struct scsi_sgl_task_params tx_sgl_task_params; struct scsi_sgl_task_params rx_sgl_task_params; struct iscsi_task_params task_params; struct iscsi_task_context *fw_task_ctx; struct qedi_ctx *qedi = qedi_conn->qedi; struct iscsi_login_req *login_hdr; struct scsi_sge *resp_sge = NULL; struct qedi_cmd *qedi_cmd; struct qedi_endpoint *ep; s16 tid = 0; u16 sq_idx = 0; int rval = 0; resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl; qedi_cmd = (struct qedi_cmd *)task->dd_data; ep = qedi_conn->ep; login_hdr = (struct iscsi_login_req *)task->hdr; tid = qedi_get_task_idx(qedi); if (tid == -1) return -ENOMEM; fw_task_ctx = (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid); memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); qedi_cmd->task_id = tid; memset(&task_params, 0, sizeof(task_params)); memset(&login_req_pdu_header, 0, sizeof(login_req_pdu_header)); memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params)); memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params)); /* Update header info */ login_req_pdu_header.opcode = login_hdr->opcode; login_req_pdu_header.version_min = login_hdr->min_version; login_req_pdu_header.version_max = login_hdr->max_version; login_req_pdu_header.flags_attr = login_hdr->flags; login_req_pdu_header.isid_tabc = swab32p((u32 *)login_hdr->isid); login_req_pdu_header.isid_d = swab16p((u16 *)&login_hdr->isid[4]); login_req_pdu_header.tsih = login_hdr->tsih; login_req_pdu_header.hdr_second_dword = ntoh24(login_hdr->dlength); qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd); login_req_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt)); login_req_pdu_header.cid = qedi_conn->iscsi_conn_id; login_req_pdu_header.cmd_sn = be32_to_cpu(login_hdr->cmdsn); login_req_pdu_header.exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn); login_req_pdu_header.exp_stat_sn = 0; /* Fill tx AHS and rx buffer */ tx_sgl_task_params.sgl = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl; tx_sgl_task_params.sgl_phys_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr); tx_sgl_task_params.sgl_phys_addr.hi = (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32); tx_sgl_task_params.total_buffer_size = ntoh24(login_hdr->dlength); tx_sgl_task_params.num_sges = 1; rx_sgl_task_params.sgl = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl; rx_sgl_task_params.sgl_phys_addr.lo = (u32)(qedi_conn->gen_pdu.resp_dma_addr); rx_sgl_task_params.sgl_phys_addr.hi = (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32); rx_sgl_task_params.total_buffer_size = resp_sge->sge_len; rx_sgl_task_params.num_sges = 1; /* Fill fw input params */ task_params.context = fw_task_ctx; task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id; task_params.itid = tid; task_params.cq_rss_number = 0; task_params.tx_io_size = ntoh24(login_hdr->dlength); task_params.rx_io_size = resp_sge->sge_len; sq_idx = qedi_get_wqe_idx(qedi_conn); task_params.sqe = &ep->sq[sq_idx]; memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); rval = init_initiator_login_request_task(&task_params, &login_req_pdu_header, &tx_sgl_task_params, &rx_sgl_task_params); if (rval) return -1; spin_lock(&qedi_conn->list_lock); list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list); qedi_cmd->io_cmd_in_list = true; qedi_conn->active_cmd_count++; spin_unlock(&qedi_conn->list_lock); qedi_ring_doorbell(qedi_conn); return 0; } int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn, struct iscsi_task *task) { struct iscsi_logout_req_hdr logout_pdu_header; struct scsi_sgl_task_params tx_sgl_task_params; struct scsi_sgl_task_params rx_sgl_task_params; struct iscsi_task_params task_params; struct iscsi_task_context *fw_task_ctx; struct iscsi_logout *logout_hdr = NULL; struct qedi_ctx *qedi = qedi_conn->qedi; struct qedi_cmd *qedi_cmd; struct qedi_endpoint *ep; s16 tid = 0; u16 sq_idx = 0; int rval = 0; qedi_cmd = (struct qedi_cmd *)task->dd_data; logout_hdr = (struct iscsi_logout *)task->hdr; ep = qedi_conn->ep; tid = qedi_get_task_idx(qedi); if (tid == -1) return -ENOMEM; fw_task_ctx = (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid); memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); qedi_cmd->task_id = tid; memset(&task_params, 0, sizeof(task_params)); memset(&logout_pdu_header, 0, sizeof(logout_pdu_header)); memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params)); memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params)); /* Update header info */ logout_pdu_header.opcode = logout_hdr->opcode; logout_pdu_header.reason_code = 0x80 | logout_hdr->flags; qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd); logout_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt)); logout_pdu_header.exp_stat_sn = be32_to_cpu(logout_hdr->exp_statsn); logout_pdu_header.cmd_sn = be32_to_cpu(logout_hdr->cmdsn); logout_pdu_header.cid = qedi_conn->iscsi_conn_id; /* Fill fw input params */ task_params.context = fw_task_ctx; task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id; task_params.itid = tid; task_params.cq_rss_number = 0; task_params.tx_io_size = 0; task_params.rx_io_size = 0; sq_idx = qedi_get_wqe_idx(qedi_conn); task_params.sqe = &ep->sq[sq_idx]; memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); rval = init_initiator_logout_request_task(&task_params, &logout_pdu_header, NULL, NULL); if (rval) return -1; spin_lock(&qedi_conn->list_lock); list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list); qedi_cmd->io_cmd_in_list = true; qedi_conn->active_cmd_count++; spin_unlock(&qedi_conn->list_lock); qedi_ring_doorbell(qedi_conn); return 0; } int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn, struct iscsi_task *task, bool in_recovery) { int rval; struct iscsi_task *ctask; struct qedi_cmd *cmd, *cmd_tmp; struct iscsi_tm *tmf_hdr; unsigned int lun = 0; bool lun_reset = false; struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; struct iscsi_session *session = conn->session; /* From recovery, task is NULL or from tmf resp valid task */ if (task) { tmf_hdr = (struct iscsi_tm *)task->hdr; if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) == ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) { lun_reset = true; lun = scsilun_to_int(&tmf_hdr->lun); } } qedi_conn->cmd_cleanup_req = 0; atomic_set(&qedi_conn->cmd_cleanup_cmpl, 0); QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "active_cmd_count=%d, cid=0x%x, in_recovery=%d, lun_reset=%d\n", qedi_conn->active_cmd_count, qedi_conn->iscsi_conn_id, in_recovery, lun_reset); if (lun_reset) spin_lock_bh(&session->back_lock); spin_lock(&qedi_conn->list_lock); list_for_each_entry_safe(cmd, cmd_tmp, &qedi_conn->active_cmd_list, io_cmd) { ctask = cmd->task; if (ctask == task) continue; if (lun_reset) { if (cmd->scsi_cmd && cmd->scsi_cmd->device) { QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "tid=0x%x itt=0x%x scsi_cmd_ptr=%p device=%p task_state=%d cmd_state=0%x cid=0x%x\n", cmd->task_id, get_itt(ctask->itt), cmd->scsi_cmd, cmd->scsi_cmd->device, ctask->state, cmd->state, qedi_conn->iscsi_conn_id); if (cmd->scsi_cmd->device->lun != lun) continue; } } qedi_conn->cmd_cleanup_req++; qedi_iscsi_cleanup_task(ctask, true); cmd->io_cmd_in_list = false; list_del_init(&cmd->io_cmd); qedi_conn->active_cmd_count--; QEDI_WARN(&qedi->dbg_ctx, "Deleted active cmd list node io_cmd=%p, cid=0x%x\n", &cmd->io_cmd, qedi_conn->iscsi_conn_id); } spin_unlock(&qedi_conn->list_lock); if (lun_reset) spin_unlock_bh(&session->back_lock); QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "cmd_cleanup_req=%d, cid=0x%x\n", qedi_conn->cmd_cleanup_req, qedi_conn->iscsi_conn_id); rval = wait_event_interruptible_timeout(qedi_conn->wait_queue, (qedi_conn->cmd_cleanup_req == atomic_read(&qedi_conn->cmd_cleanup_cmpl)) || test_bit(QEDI_IN_RECOVERY, &qedi->flags), 5 * HZ); if (rval) { QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "i/o cmd_cleanup_req=%d, equal to cmd_cleanup_cmpl=%d, cid=0x%x\n", qedi_conn->cmd_cleanup_req, atomic_read(&qedi_conn->cmd_cleanup_cmpl), qedi_conn->iscsi_conn_id); return 0; } QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "i/o cmd_cleanup_req=%d, not equal to cmd_cleanup_cmpl=%d, cid=0x%x\n", qedi_conn->cmd_cleanup_req, atomic_read(&qedi_conn->cmd_cleanup_cmpl), qedi_conn->iscsi_conn_id); iscsi_host_for_each_session(qedi->shost, qedi_mark_device_missing); qedi_ops->common->drain(qedi->cdev); /* Enable IOs for all other sessions except current.*/ if (!wait_event_interruptible_timeout(qedi_conn->wait_queue, (qedi_conn->cmd_cleanup_req == atomic_read(&qedi_conn->cmd_cleanup_cmpl)) || test_bit(QEDI_IN_RECOVERY, &qedi->flags), 5 * HZ)) { iscsi_host_for_each_session(qedi->shost, qedi_mark_device_available); return -1; } iscsi_host_for_each_session(qedi->shost, qedi_mark_device_available); return 0; } void qedi_clearsq(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn, struct iscsi_task *task) { struct qedi_endpoint *qedi_ep; int rval; qedi_ep = qedi_conn->ep; qedi_conn->cmd_cleanup_req = 0; atomic_set(&qedi_conn->cmd_cleanup_cmpl, 0); if (!qedi_ep) { QEDI_WARN(&qedi->dbg_ctx, "Cannot proceed, ep already disconnected, cid=0x%x\n", qedi_conn->iscsi_conn_id); return; } QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "Clearing SQ for cid=0x%x, conn=%p, ep=%p\n", qedi_conn->iscsi_conn_id, qedi_conn, qedi_ep); qedi_ops->clear_sq(qedi->cdev, qedi_ep->handle); rval = qedi_cleanup_all_io(qedi, qedi_conn, task, true); if (rval) { QEDI_ERR(&qedi->dbg_ctx, "fatal error, need hard reset, cid=0x%x\n", qedi_conn->iscsi_conn_id); WARN_ON(1); } } static int qedi_wait_for_cleanup_request(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn, struct iscsi_task *task, struct qedi_cmd *qedi_cmd, struct qedi_work_map *list_work) { struct qedi_cmd *cmd = (struct qedi_cmd *)task->dd_data; int wait; wait = wait_event_interruptible_timeout(qedi_conn->wait_queue, ((qedi_cmd->state == CLEANUP_RECV) || ((qedi_cmd->type == TYPEIO) && (cmd->state == RESPONSE_RECEIVED))), 5 * HZ); if (!wait) { qedi_cmd->state = CLEANUP_WAIT_FAILED; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "Cleanup timedout tid=0x%x, issue connection recovery, cid=0x%x\n", cmd->task_id, qedi_conn->iscsi_conn_id); return -1; } return 0; } static void qedi_abort_work(struct work_struct *work) { struct qedi_cmd *qedi_cmd = container_of(work, struct qedi_cmd, tmf_work); struct qedi_conn *qedi_conn = qedi_cmd->conn; struct qedi_ctx *qedi = qedi_conn->qedi; struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; struct qedi_work_map *list_work = NULL; struct iscsi_task *mtask; struct qedi_cmd *cmd; struct iscsi_task *ctask; struct iscsi_tm *tmf_hdr; s16 rval = 0; mtask = qedi_cmd->task; tmf_hdr = (struct iscsi_tm *)mtask->hdr; spin_lock_bh(&conn->session->back_lock); ctask = iscsi_itt_to_ctask(conn, tmf_hdr->rtt); if (!ctask) { spin_unlock_bh(&conn->session->back_lock); QEDI_ERR(&qedi->dbg_ctx, "Invalid RTT. Letting abort timeout.\n"); goto clear_cleanup; } if (iscsi_task_is_completed(ctask)) { spin_unlock_bh(&conn->session->back_lock); QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "Task already completed\n"); /* * We have to still send the TMF because libiscsi needs the * response to avoid a timeout. */ goto send_tmf; } spin_unlock_bh(&conn->session->back_lock); cmd = (struct qedi_cmd *)ctask->dd_data; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "Abort tmf rtt=0x%x, cmd itt=0x%x, cmd tid=0x%x, cid=0x%x\n", get_itt(tmf_hdr->rtt), get_itt(ctask->itt), cmd->task_id, qedi_conn->iscsi_conn_id); if (qedi_do_not_recover) { QEDI_ERR(&qedi->dbg_ctx, "DONT SEND CLEANUP/ABORT %d\n", qedi_do_not_recover); goto clear_cleanup; } list_work = kzalloc(sizeof(*list_work), GFP_NOIO); if (!list_work) { QEDI_ERR(&qedi->dbg_ctx, "Memory allocation failed\n"); goto clear_cleanup; } qedi_cmd->type = TYPEIO; qedi_cmd->state = CLEANUP_WAIT; list_work->qedi_cmd = qedi_cmd; list_work->rtid = cmd->task_id; list_work->state = QEDI_WORK_SCHEDULED; list_work->ctask = ctask; qedi_cmd->list_tmf_work = list_work; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "Queue tmf work=%p, list node=%p, cid=0x%x, tmf flags=0x%x\n", list_work->ptr_tmf_work, list_work, qedi_conn->iscsi_conn_id, tmf_hdr->flags); spin_lock_bh(&qedi_conn->tmf_work_lock); list_add_tail(&list_work->list, &qedi_conn->tmf_work_list); spin_unlock_bh(&qedi_conn->tmf_work_lock); qedi_iscsi_cleanup_task(ctask, false); rval = qedi_wait_for_cleanup_request(qedi, qedi_conn, ctask, qedi_cmd, list_work); if (rval == -1) { QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "FW cleanup got escalated, cid=0x%x\n", qedi_conn->iscsi_conn_id); goto ldel_exit; } send_tmf: send_iscsi_tmf(qedi_conn, qedi_cmd->task, ctask); goto clear_cleanup; ldel_exit: spin_lock_bh(&qedi_conn->tmf_work_lock); if (qedi_cmd->list_tmf_work) { list_del_init(&list_work->list); qedi_cmd->list_tmf_work = NULL; kfree(list_work); } spin_unlock_bh(&qedi_conn->tmf_work_lock); spin_lock(&qedi_conn->list_lock); if (likely(cmd->io_cmd_in_list)) { cmd->io_cmd_in_list = false; list_del_init(&cmd->io_cmd); qedi_conn->active_cmd_count--; } spin_unlock(&qedi_conn->list_lock); clear_cleanup: spin_lock(&qedi_conn->tmf_work_lock); qedi_conn->fw_cleanup_works--; spin_unlock(&qedi_conn->tmf_work_lock); } static int send_iscsi_tmf(struct qedi_conn *qedi_conn, struct iscsi_task *mtask, struct iscsi_task *ctask) { struct iscsi_tmf_request_hdr tmf_pdu_header; struct iscsi_task_params task_params; struct qedi_ctx *qedi = qedi_conn->qedi; struct iscsi_task_context *fw_task_ctx; struct iscsi_tm *tmf_hdr; struct qedi_cmd *qedi_cmd; struct qedi_cmd *cmd; struct qedi_endpoint *ep; u32 scsi_lun[2]; s16 tid = 0; u16 sq_idx = 0; tmf_hdr = (struct iscsi_tm *)mtask->hdr; qedi_cmd = (struct qedi_cmd *)mtask->dd_data; ep = qedi_conn->ep; if (!ep) return -ENODEV; tid = qedi_get_task_idx(qedi); if (tid == -1) return -ENOMEM; fw_task_ctx = (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid); memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); qedi_cmd->task_id = tid; memset(&task_params, 0, sizeof(task_params)); memset(&tmf_pdu_header, 0, sizeof(tmf_pdu_header)); /* Update header info */ qedi_update_itt_map(qedi, tid, mtask->itt, qedi_cmd); tmf_pdu_header.itt = qedi_set_itt(tid, get_itt(mtask->itt)); tmf_pdu_header.cmd_sn = be32_to_cpu(tmf_hdr->cmdsn); memcpy(scsi_lun, &tmf_hdr->lun, sizeof(struct scsi_lun)); tmf_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]); tmf_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]); if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) == ISCSI_TM_FUNC_ABORT_TASK) { cmd = (struct qedi_cmd *)ctask->dd_data; tmf_pdu_header.rtt = qedi_set_itt(cmd->task_id, get_itt(tmf_hdr->rtt)); } else { tmf_pdu_header.rtt = ISCSI_RESERVED_TAG; } tmf_pdu_header.opcode = tmf_hdr->opcode; tmf_pdu_header.function = tmf_hdr->flags; tmf_pdu_header.hdr_second_dword = ntoh24(tmf_hdr->dlength); tmf_pdu_header.ref_cmd_sn = be32_to_cpu(tmf_hdr->refcmdsn); /* Fill fw input params */ task_params.context = fw_task_ctx; task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id; task_params.itid = tid; task_params.cq_rss_number = 0; task_params.tx_io_size = 0; task_params.rx_io_size = 0; sq_idx = qedi_get_wqe_idx(qedi_conn); task_params.sqe = &ep->sq[sq_idx]; memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); init_initiator_tmf_request_task(&task_params, &tmf_pdu_header); spin_lock(&qedi_conn->list_lock); list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list); qedi_cmd->io_cmd_in_list = true; qedi_conn->active_cmd_count++; spin_unlock(&qedi_conn->list_lock); qedi_ring_doorbell(qedi_conn); return 0; } int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn, struct iscsi_task *mtask) { struct iscsi_tm *tmf_hdr = (struct iscsi_tm *)mtask->hdr; struct qedi_cmd *qedi_cmd = mtask->dd_data; struct qedi_ctx *qedi = qedi_conn->qedi; int rc = 0; switch (tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) { case ISCSI_TM_FUNC_ABORT_TASK: spin_lock(&qedi_conn->tmf_work_lock); qedi_conn->fw_cleanup_works++; spin_unlock(&qedi_conn->tmf_work_lock); INIT_WORK(&qedi_cmd->tmf_work, qedi_abort_work); queue_work(qedi->tmf_thread, &qedi_cmd->tmf_work); break; case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET: case ISCSI_TM_FUNC_TARGET_WARM_RESET: case ISCSI_TM_FUNC_TARGET_COLD_RESET: rc = send_iscsi_tmf(qedi_conn, mtask, NULL); break; default: QEDI_ERR(&qedi->dbg_ctx, "Invalid tmf, cid=0x%x\n", qedi_conn->iscsi_conn_id); return -EINVAL; } return rc; } int qedi_send_iscsi_text(struct qedi_conn *qedi_conn, struct iscsi_task *task) { struct iscsi_text_request_hdr text_request_pdu_header; struct scsi_sgl_task_params tx_sgl_task_params; struct scsi_sgl_task_params rx_sgl_task_params; struct iscsi_task_params task_params; struct iscsi_task_context *fw_task_ctx; struct qedi_ctx *qedi = qedi_conn->qedi; struct iscsi_text *text_hdr; struct scsi_sge *req_sge = NULL; struct scsi_sge *resp_sge = NULL; struct qedi_cmd *qedi_cmd; struct qedi_endpoint *ep; s16 tid = 0; u16 sq_idx = 0; int rval = 0; req_sge = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl; resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl; qedi_cmd = (struct qedi_cmd *)task->dd_data; text_hdr = (struct iscsi_text *)task->hdr; ep = qedi_conn->ep; tid = qedi_get_task_idx(qedi); if (tid == -1) return -ENOMEM; fw_task_ctx = (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid); memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); qedi_cmd->task_id = tid; memset(&task_params, 0, sizeof(task_params)); memset(&text_request_pdu_header, 0, sizeof(text_request_pdu_header)); memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params)); memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params)); /* Update header info */ text_request_pdu_header.opcode = text_hdr->opcode; text_request_pdu_header.flags_attr = text_hdr->flags; qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd); text_request_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt)); text_request_pdu_header.ttt = text_hdr->ttt; text_request_pdu_header.cmd_sn = be32_to_cpu(text_hdr->cmdsn); text_request_pdu_header.exp_stat_sn = be32_to_cpu(text_hdr->exp_statsn); text_request_pdu_header.hdr_second_dword = ntoh24(text_hdr->dlength); /* Fill tx AHS and rx buffer */ tx_sgl_task_params.sgl = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl; tx_sgl_task_params.sgl_phys_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr); tx_sgl_task_params.sgl_phys_addr.hi = (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32); tx_sgl_task_params.total_buffer_size = req_sge->sge_len; tx_sgl_task_params.num_sges = 1; rx_sgl_task_params.sgl = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl; rx_sgl_task_params.sgl_phys_addr.lo = (u32)(qedi_conn->gen_pdu.resp_dma_addr); rx_sgl_task_params.sgl_phys_addr.hi = (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32); rx_sgl_task_params.total_buffer_size = resp_sge->sge_len; rx_sgl_task_params.num_sges = 1; /* Fill fw input params */ task_params.context = fw_task_ctx; task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id; task_params.itid = tid; task_params.cq_rss_number = 0; task_params.tx_io_size = ntoh24(text_hdr->dlength); task_params.rx_io_size = resp_sge->sge_len; sq_idx = qedi_get_wqe_idx(qedi_conn); task_params.sqe = &ep->sq[sq_idx]; memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); rval = init_initiator_text_request_task(&task_params, &text_request_pdu_header, &tx_sgl_task_params, &rx_sgl_task_params); if (rval) return -1; spin_lock(&qedi_conn->list_lock); list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list); qedi_cmd->io_cmd_in_list = true; qedi_conn->active_cmd_count++; spin_unlock(&qedi_conn->list_lock); qedi_ring_doorbell(qedi_conn); return 0; } int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn, struct iscsi_task *task, char *datap, int data_len, int unsol) { struct iscsi_nop_out_hdr nop_out_pdu_header; struct scsi_sgl_task_params tx_sgl_task_params; struct scsi_sgl_task_params rx_sgl_task_params; struct iscsi_task_params task_params; struct qedi_ctx *qedi = qedi_conn->qedi; struct iscsi_task_context *fw_task_ctx; struct iscsi_nopout *nopout_hdr; struct scsi_sge *resp_sge = NULL; struct qedi_cmd *qedi_cmd; struct qedi_endpoint *ep; u32 scsi_lun[2]; s16 tid = 0; u16 sq_idx = 0; int rval = 0; resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl; qedi_cmd = (struct qedi_cmd *)task->dd_data; nopout_hdr = (struct iscsi_nopout *)task->hdr; ep = qedi_conn->ep; tid = qedi_get_task_idx(qedi); if (tid == -1) return -ENOMEM; fw_task_ctx = (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid); memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); qedi_cmd->task_id = tid; memset(&task_params, 0, sizeof(task_params)); memset(&nop_out_pdu_header, 0, sizeof(nop_out_pdu_header)); memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params)); memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params)); /* Update header info */ nop_out_pdu_header.opcode = nopout_hdr->opcode; SET_FIELD(nop_out_pdu_header.flags_attr, ISCSI_NOP_OUT_HDR_CONST1, 1); SET_FIELD(nop_out_pdu_header.flags_attr, ISCSI_NOP_OUT_HDR_RSRV, 0); memcpy(scsi_lun, &nopout_hdr->lun, sizeof(struct scsi_lun)); nop_out_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]); nop_out_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]); nop_out_pdu_header.cmd_sn = be32_to_cpu(nopout_hdr->cmdsn); nop_out_pdu_header.exp_stat_sn = be32_to_cpu(nopout_hdr->exp_statsn); qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd); if (nopout_hdr->ttt != ISCSI_TTT_ALL_ONES) { nop_out_pdu_header.itt = be32_to_cpu(nopout_hdr->itt); nop_out_pdu_header.ttt = be32_to_cpu(nopout_hdr->ttt); } else { nop_out_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt)); nop_out_pdu_header.ttt = ISCSI_TTT_ALL_ONES; spin_lock(&qedi_conn->list_lock); list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list); qedi_cmd->io_cmd_in_list = true; qedi_conn->active_cmd_count++; spin_unlock(&qedi_conn->list_lock); } /* Fill tx AHS and rx buffer */ if (data_len) { tx_sgl_task_params.sgl = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl; tx_sgl_task_params.sgl_phys_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr); tx_sgl_task_params.sgl_phys_addr.hi = (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32); tx_sgl_task_params.total_buffer_size = data_len; tx_sgl_task_params.num_sges = 1; rx_sgl_task_params.sgl = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl; rx_sgl_task_params.sgl_phys_addr.lo = (u32)(qedi_conn->gen_pdu.resp_dma_addr); rx_sgl_task_params.sgl_phys_addr.hi = (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32); rx_sgl_task_params.total_buffer_size = resp_sge->sge_len; rx_sgl_task_params.num_sges = 1; } /* Fill fw input params */ task_params.context = fw_task_ctx; task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id; task_params.itid = tid; task_params.cq_rss_number = 0; task_params.tx_io_size = data_len; task_params.rx_io_size = resp_sge->sge_len; sq_idx = qedi_get_wqe_idx(qedi_conn); task_params.sqe = &ep->sq[sq_idx]; memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); rval = init_initiator_nop_out_task(&task_params, &nop_out_pdu_header, &tx_sgl_task_params, &rx_sgl_task_params); if (rval) return -1; qedi_ring_doorbell(qedi_conn); return 0; } static int qedi_split_bd(struct qedi_cmd *cmd, u64 addr, int sg_len, int bd_index) { struct scsi_sge *bd = cmd->io_tbl.sge_tbl; int frag_size, sg_frags; sg_frags = 0; while (sg_len) { if (addr % QEDI_PAGE_SIZE) frag_size = (QEDI_PAGE_SIZE - (addr % QEDI_PAGE_SIZE)); else frag_size = (sg_len > QEDI_BD_SPLIT_SZ) ? 0 : (sg_len % QEDI_BD_SPLIT_SZ); if (frag_size == 0) frag_size = QEDI_BD_SPLIT_SZ; bd[bd_index + sg_frags].sge_addr.lo = (addr & 0xffffffff); bd[bd_index + sg_frags].sge_addr.hi = (addr >> 32); bd[bd_index + sg_frags].sge_len = (u16)frag_size; QEDI_INFO(&cmd->conn->qedi->dbg_ctx, QEDI_LOG_IO, "split sge %d: addr=%llx, len=%x", (bd_index + sg_frags), addr, frag_size); addr += (u64)frag_size; sg_frags++; sg_len -= frag_size; } return sg_frags; } static int qedi_map_scsi_sg(struct qedi_ctx *qedi, struct qedi_cmd *cmd) { struct scsi_cmnd *sc = cmd->scsi_cmd; struct scsi_sge *bd = cmd->io_tbl.sge_tbl; struct scatterlist *sg; int byte_count = 0; int bd_count = 0; int sg_count; int sg_len; int sg_frags; u64 addr, end_addr; int i; WARN_ON(scsi_sg_count(sc) > QEDI_ISCSI_MAX_BDS_PER_CMD); sg_count = dma_map_sg(&qedi->pdev->dev, scsi_sglist(sc), scsi_sg_count(sc), sc->sc_data_direction); /* * New condition to send single SGE as cached-SGL. * Single SGE with length less than 64K. */ sg = scsi_sglist(sc); if ((sg_count == 1) && (sg_dma_len(sg) <= MAX_SGLEN_FOR_CACHESGL)) { sg_len = sg_dma_len(sg); addr = (u64)sg_dma_address(sg); bd[bd_count].sge_addr.lo = (addr & 0xffffffff); bd[bd_count].sge_addr.hi = (addr >> 32); bd[bd_count].sge_len = (u16)sg_len; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "single-cached-sgl: bd_count:%d addr=%llx, len=%x", sg_count, addr, sg_len); return ++bd_count; } scsi_for_each_sg(sc, sg, sg_count, i) { sg_len = sg_dma_len(sg); addr = (u64)sg_dma_address(sg); end_addr = (addr + sg_len); /* * first sg elem in the 'list', * check if end addr is page-aligned. */ if ((i == 0) && (sg_count > 1) && (end_addr % QEDI_PAGE_SIZE)) cmd->use_slowpath = true; /* * last sg elem in the 'list', * check if start addr is page-aligned. */ else if ((i == (sg_count - 1)) && (sg_count > 1) && (addr % QEDI_PAGE_SIZE)) cmd->use_slowpath = true; /* * middle sg elements in list, * check if start and end addr is page-aligned */ else if ((i != 0) && (i != (sg_count - 1)) && ((addr % QEDI_PAGE_SIZE) || (end_addr % QEDI_PAGE_SIZE))) cmd->use_slowpath = true; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "sg[%d] size=0x%x", i, sg_len); if (sg_len > QEDI_BD_SPLIT_SZ) { sg_frags = qedi_split_bd(cmd, addr, sg_len, bd_count); } else { sg_frags = 1; bd[bd_count].sge_addr.lo = addr & 0xffffffff; bd[bd_count].sge_addr.hi = addr >> 32; bd[bd_count].sge_len = sg_len; } byte_count += sg_len; bd_count += sg_frags; } if (byte_count != scsi_bufflen(sc)) QEDI_ERR(&qedi->dbg_ctx, "byte_count = %d != scsi_bufflen = %d\n", byte_count, scsi_bufflen(sc)); else QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "byte_count = %d\n", byte_count); WARN_ON(byte_count != scsi_bufflen(sc)); return bd_count; } static void qedi_iscsi_map_sg_list(struct qedi_cmd *cmd) { int bd_count; struct scsi_cmnd *sc = cmd->scsi_cmd; if (scsi_sg_count(sc)) { bd_count = qedi_map_scsi_sg(cmd->conn->qedi, cmd); if (bd_count == 0) return; } else { struct scsi_sge *bd = cmd->io_tbl.sge_tbl; bd[0].sge_addr.lo = 0; bd[0].sge_addr.hi = 0; bd[0].sge_len = 0; bd_count = 0; } cmd->io_tbl.sge_valid = bd_count; } static void qedi_cpy_scsi_cdb(struct scsi_cmnd *sc, u32 *dstp) { u32 dword; int lpcnt; u8 *srcp; lpcnt = sc->cmd_len / sizeof(dword); srcp = (u8 *)sc->cmnd; while (lpcnt--) { memcpy(&dword, (const void *)srcp, 4); *dstp = cpu_to_be32(dword); srcp += 4; dstp++; } if (sc->cmd_len & 0x3) { dword = (u32)srcp[0] | ((u32)srcp[1] << 8); *dstp = cpu_to_be32(dword); } } void qedi_trace_io(struct qedi_ctx *qedi, struct iscsi_task *task, u16 tid, int8_t direction) { struct qedi_io_log *io_log; struct iscsi_conn *conn = task->conn; struct qedi_conn *qedi_conn = conn->dd_data; struct scsi_cmnd *sc_cmd = task->sc; unsigned long flags; spin_lock_irqsave(&qedi->io_trace_lock, flags); io_log = &qedi->io_trace_buf[qedi->io_trace_idx]; io_log->direction = direction; io_log->task_id = tid; io_log->cid = qedi_conn->iscsi_conn_id; io_log->lun = sc_cmd->device->lun; io_log->op = sc_cmd->cmnd[0]; io_log->lba[0] = sc_cmd->cmnd[2]; io_log->lba[1] = sc_cmd->cmnd[3]; io_log->lba[2] = sc_cmd->cmnd[4]; io_log->lba[3] = sc_cmd->cmnd[5]; io_log->bufflen = scsi_bufflen(sc_cmd); io_log->sg_count = scsi_sg_count(sc_cmd); io_log->fast_sgs = qedi->fast_sgls; io_log->cached_sgs = qedi->cached_sgls; io_log->slow_sgs = qedi->slow_sgls; io_log->cached_sge = qedi->use_cached_sge; io_log->slow_sge = qedi->use_slow_sge; io_log->fast_sge = qedi->use_fast_sge; io_log->result = sc_cmd->result; io_log->jiffies = jiffies; io_log->blk_req_cpu = smp_processor_id(); if (direction == QEDI_IO_TRACE_REQ) { /* For requests we only care about the submission CPU */ io_log->req_cpu = smp_processor_id() % qedi->num_queues; io_log->intr_cpu = 0; io_log->blk_rsp_cpu = 0; } else if (direction == QEDI_IO_TRACE_RSP) { io_log->req_cpu = smp_processor_id() % qedi->num_queues; io_log->intr_cpu = qedi->intr_cpu; io_log->blk_rsp_cpu = smp_processor_id(); } qedi->io_trace_idx++; if (qedi->io_trace_idx == QEDI_IO_TRACE_SIZE) qedi->io_trace_idx = 0; qedi->use_cached_sge = false; qedi->use_slow_sge = false; qedi->use_fast_sge = false; spin_unlock_irqrestore(&qedi->io_trace_lock, flags); } int qedi_iscsi_send_ioreq(struct iscsi_task *task) { struct iscsi_conn *conn = task->conn; struct iscsi_session *session = conn->session; struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session); struct qedi_ctx *qedi = iscsi_host_priv(shost); struct qedi_conn *qedi_conn = conn->dd_data; struct qedi_cmd *cmd = task->dd_data; struct scsi_cmnd *sc = task->sc; struct iscsi_cmd_hdr cmd_pdu_header; struct scsi_sgl_task_params tx_sgl_task_params; struct scsi_sgl_task_params rx_sgl_task_params; struct scsi_sgl_task_params *prx_sgl = NULL; struct scsi_sgl_task_params *ptx_sgl = NULL; struct iscsi_task_params task_params; struct iscsi_conn_params conn_params; struct scsi_initiator_cmd_params cmd_params; struct iscsi_task_context *fw_task_ctx; struct iscsi_cls_conn *cls_conn; struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr; enum iscsi_task_type task_type = MAX_ISCSI_TASK_TYPE; struct qedi_endpoint *ep; u32 scsi_lun[2]; s16 tid = 0; u16 sq_idx = 0; u16 cq_idx; int rval = 0; ep = qedi_conn->ep; cls_conn = qedi_conn->cls_conn; conn = cls_conn->dd_data; qedi_iscsi_map_sg_list(cmd); int_to_scsilun(sc->device->lun, (struct scsi_lun *)scsi_lun); tid = qedi_get_task_idx(qedi); if (tid == -1) return -ENOMEM; fw_task_ctx = (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid); memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); cmd->task_id = tid; memset(&task_params, 0, sizeof(task_params)); memset(&cmd_pdu_header, 0, sizeof(cmd_pdu_header)); memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params)); memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params)); memset(&conn_params, 0, sizeof(conn_params)); memset(&cmd_params, 0, sizeof(cmd_params)); cq_idx = smp_processor_id() % qedi->num_queues; /* Update header info */ SET_FIELD(cmd_pdu_header.flags_attr, ISCSI_CMD_HDR_ATTR, ISCSI_ATTR_SIMPLE); if (hdr->cdb[0] != TEST_UNIT_READY) { if (sc->sc_data_direction == DMA_TO_DEVICE) { SET_FIELD(cmd_pdu_header.flags_attr, ISCSI_CMD_HDR_WRITE, 1); task_type = ISCSI_TASK_TYPE_INITIATOR_WRITE; } else { SET_FIELD(cmd_pdu_header.flags_attr, ISCSI_CMD_HDR_READ, 1); task_type = ISCSI_TASK_TYPE_INITIATOR_READ; } } cmd_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]); cmd_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]); qedi_update_itt_map(qedi, tid, task->itt, cmd); cmd_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt)); cmd_pdu_header.expected_transfer_length = cpu_to_be32(hdr->data_length); cmd_pdu_header.hdr_second_dword = ntoh24(hdr->dlength); cmd_pdu_header.cmd_sn = be32_to_cpu(hdr->cmdsn); cmd_pdu_header.hdr_first_byte = hdr->opcode; qedi_cpy_scsi_cdb(sc, (u32 *)cmd_pdu_header.cdb); /* Fill tx AHS and rx buffer */ if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) { tx_sgl_task_params.sgl = cmd->io_tbl.sge_tbl; tx_sgl_task_params.sgl_phys_addr.lo = (u32)(cmd->io_tbl.sge_tbl_dma); tx_sgl_task_params.sgl_phys_addr.hi = (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32); tx_sgl_task_params.total_buffer_size = scsi_bufflen(sc); tx_sgl_task_params.num_sges = cmd->io_tbl.sge_valid; if (cmd->use_slowpath) tx_sgl_task_params.small_mid_sge = true; } else if (task_type == ISCSI_TASK_TYPE_INITIATOR_READ) { rx_sgl_task_params.sgl = cmd->io_tbl.sge_tbl; rx_sgl_task_params.sgl_phys_addr.lo = (u32)(cmd->io_tbl.sge_tbl_dma); rx_sgl_task_params.sgl_phys_addr.hi = (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32); rx_sgl_task_params.total_buffer_size = scsi_bufflen(sc); rx_sgl_task_params.num_sges = cmd->io_tbl.sge_valid; } /* Add conn param */ conn_params.first_burst_length = conn->session->first_burst; conn_params.max_send_pdu_length = conn->max_xmit_dlength; conn_params.max_burst_length = conn->session->max_burst; if (conn->session->initial_r2t_en) conn_params.initial_r2t = true; if (conn->session->imm_data_en) conn_params.immediate_data = true; /* Add cmd params */ cmd_params.sense_data_buffer_phys_addr.lo = (u32)cmd->sense_buffer_dma; cmd_params.sense_data_buffer_phys_addr.hi = (u32)((u64)cmd->sense_buffer_dma >> 32); /* Fill fw input params */ task_params.context = fw_task_ctx; task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id; task_params.itid = tid; task_params.cq_rss_number = cq_idx; if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) task_params.tx_io_size = scsi_bufflen(sc); else if (task_type == ISCSI_TASK_TYPE_INITIATOR_READ) task_params.rx_io_size = scsi_bufflen(sc); sq_idx = qedi_get_wqe_idx(qedi_conn); task_params.sqe = &ep->sq[sq_idx]; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "%s: %s-SGL: sg_len=0x%x num_sges=0x%x first-sge-lo=0x%x first-sge-hi=0x%x\n", (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) ? "Write " : "Read ", (cmd->io_tbl.sge_valid == 1) ? "Single" : (cmd->use_slowpath ? "SLOW" : "FAST"), (u16)cmd->io_tbl.sge_valid, scsi_bufflen(sc), (u32)(cmd->io_tbl.sge_tbl_dma), (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32)); memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); if (task_params.tx_io_size != 0) ptx_sgl = &tx_sgl_task_params; if (task_params.rx_io_size != 0) prx_sgl = &rx_sgl_task_params; rval = init_initiator_rw_iscsi_task(&task_params, &conn_params, &cmd_params, &cmd_pdu_header, ptx_sgl, prx_sgl, NULL); if (rval) return -1; spin_lock(&qedi_conn->list_lock); list_add_tail(&cmd->io_cmd, &qedi_conn->active_cmd_list); cmd->io_cmd_in_list = true; qedi_conn->active_cmd_count++; spin_unlock(&qedi_conn->list_lock); qedi_ring_doorbell(qedi_conn); return 0; } int qedi_iscsi_cleanup_task(struct iscsi_task *task, bool mark_cmd_node_deleted) { struct iscsi_task_params task_params; struct qedi_endpoint *ep; struct iscsi_conn *conn = task->conn; struct qedi_conn *qedi_conn = conn->dd_data; struct qedi_cmd *cmd = task->dd_data; u16 sq_idx = 0; int rval = 0; QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "issue cleanup tid=0x%x itt=0x%x task_state=%d cmd_state=0%x cid=0x%x\n", cmd->task_id, get_itt(task->itt), task->state, cmd->state, qedi_conn->iscsi_conn_id); memset(&task_params, 0, sizeof(task_params)); ep = qedi_conn->ep; sq_idx = qedi_get_wqe_idx(qedi_conn); task_params.sqe = &ep->sq[sq_idx]; memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); task_params.itid = cmd->task_id; rval = init_cleanup_task(&task_params); if (rval) return rval; qedi_ring_doorbell(qedi_conn); return 0; }
linux-master
drivers/scsi/qedi/qedi_fw.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic iSCSI Offload Driver * Copyright (c) 2016 Cavium Inc. */ #include "qedi.h" #include "qedi_gbl.h" #include "qedi_iscsi.h" #include "qedi_dbg.h" static inline struct qedi_ctx *qedi_dev_to_hba(struct device *dev) { struct Scsi_Host *shost = class_to_shost(dev); return iscsi_host_priv(shost); } static ssize_t port_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct qedi_ctx *qedi = qedi_dev_to_hba(dev); if (atomic_read(&qedi->link_state) == QEDI_LINK_UP) return sprintf(buf, "Online\n"); else return sprintf(buf, "Linkdown\n"); } static ssize_t speed_show(struct device *dev, struct device_attribute *attr, char *buf) { struct qedi_ctx *qedi = qedi_dev_to_hba(dev); struct qed_link_output if_link; qedi_ops->common->get_link(qedi->cdev, &if_link); return sprintf(buf, "%d Gbit\n", if_link.speed / 1000); } static DEVICE_ATTR_RO(port_state); static DEVICE_ATTR_RO(speed); static struct attribute *qedi_shost_attrs[] = { &dev_attr_port_state.attr, &dev_attr_speed.attr, NULL }; static const struct attribute_group qedi_shost_attr_group = { .attrs = qedi_shost_attrs }; const struct attribute_group *qedi_shost_groups[] = { &qedi_shost_attr_group, NULL };
linux-master
drivers/scsi/qedi/qedi_sysfs.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic iSCSI Offload Driver * Copyright (c) 2016 Cavium Inc. */ #include <linux/module.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/if_arp.h> #include <scsi/iscsi_if.h> #include <linux/inet.h> #include <net/arp.h> #include <linux/list.h> #include <linux/kthread.h> #include <linux/mm.h> #include <linux/if_vlan.h> #include <linux/cpu.h> #include <linux/iscsi_boot_sysfs.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_host.h> #include <scsi/scsi.h> #include "qedi.h" #include "qedi_gbl.h" #include "qedi_iscsi.h" static uint qedi_qed_debug; module_param(qedi_qed_debug, uint, 0644); MODULE_PARM_DESC(qedi_qed_debug, " QED debug level 0 (default)"); static uint qedi_fw_debug; module_param(qedi_fw_debug, uint, 0644); MODULE_PARM_DESC(qedi_fw_debug, " Firmware debug level 0(default) to 3"); uint qedi_dbg_log = QEDI_LOG_WARN | QEDI_LOG_SCSI_TM; module_param(qedi_dbg_log, uint, 0644); MODULE_PARM_DESC(qedi_dbg_log, " Default debug level"); uint qedi_io_tracing; module_param(qedi_io_tracing, uint, 0644); MODULE_PARM_DESC(qedi_io_tracing, " Enable logging of SCSI requests/completions into trace buffer. (default off)."); static uint qedi_ll2_buf_size = 0x400; module_param(qedi_ll2_buf_size, uint, 0644); MODULE_PARM_DESC(qedi_ll2_buf_size, "parameter to set ping packet size, default - 0x400, Jumbo packets - 0x2400."); static uint qedi_flags_override; module_param(qedi_flags_override, uint, 0644); MODULE_PARM_DESC(qedi_flags_override, "Disable/Enable MFW error flags bits action."); const struct qed_iscsi_ops *qedi_ops; static struct scsi_transport_template *qedi_scsi_transport; static struct pci_driver qedi_pci_driver; static DEFINE_PER_CPU(struct qedi_percpu_s, qedi_percpu); static LIST_HEAD(qedi_udev_list); /* Static function declaration */ static int qedi_alloc_global_queues(struct qedi_ctx *qedi); static void qedi_free_global_queues(struct qedi_ctx *qedi); static struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid); static void qedi_reset_uio_rings(struct qedi_uio_dev *udev); static void qedi_ll2_free_skbs(struct qedi_ctx *qedi); static struct nvm_iscsi_block *qedi_get_nvram_block(struct qedi_ctx *qedi); static void qedi_recovery_handler(struct work_struct *work); static void qedi_schedule_hw_err_handler(void *dev, enum qed_hw_err_type err_type); static int qedi_suspend(struct pci_dev *pdev, pm_message_t state); static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle) { struct qedi_ctx *qedi; struct qedi_endpoint *qedi_ep; struct iscsi_eqe_data *data; int rval = 0; if (!context || !fw_handle) { QEDI_ERR(NULL, "Recv event with ctx NULL\n"); return -EINVAL; } qedi = (struct qedi_ctx *)context; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "Recv Event %d fw_handle %p\n", fw_event_code, fw_handle); data = (struct iscsi_eqe_data *)fw_handle; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "icid=0x%x conn_id=0x%x err-code=0x%x error-pdu-opcode-reserved=0x%x\n", data->icid, data->conn_id, data->error_code, data->error_pdu_opcode_reserved); qedi_ep = qedi->ep_tbl[data->icid]; if (!qedi_ep) { QEDI_WARN(&qedi->dbg_ctx, "Cannot process event, ep already disconnected, cid=0x%x\n", data->icid); WARN_ON(1); return -ENODEV; } switch (fw_event_code) { case ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE: if (qedi_ep->state == EP_STATE_OFLDCONN_START) qedi_ep->state = EP_STATE_OFLDCONN_COMPL; wake_up_interruptible(&qedi_ep->tcp_ofld_wait); break; case ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE: qedi_ep->state = EP_STATE_DISCONN_COMPL; wake_up_interruptible(&qedi_ep->tcp_ofld_wait); break; case ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR: qedi_process_iscsi_error(qedi_ep, data); break; case ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD: case ISCSI_EVENT_TYPE_ASYN_SYN_RCVD: case ISCSI_EVENT_TYPE_ASYN_MAX_RT_TIME: case ISCSI_EVENT_TYPE_ASYN_MAX_RT_CNT: case ISCSI_EVENT_TYPE_ASYN_MAX_KA_PROBES_CNT: case ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2: case ISCSI_EVENT_TYPE_TCP_CONN_ERROR: qedi_process_tcp_error(qedi_ep, data); break; default: QEDI_ERR(&qedi->dbg_ctx, "Recv Unknown Event %u\n", fw_event_code); } return rval; } static int qedi_uio_open(struct uio_info *uinfo, struct inode *inode) { struct qedi_uio_dev *udev = uinfo->priv; struct qedi_ctx *qedi = udev->qedi; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (udev->uio_dev != -1) return -EBUSY; rtnl_lock(); udev->uio_dev = iminor(inode); qedi_reset_uio_rings(udev); set_bit(UIO_DEV_OPENED, &qedi->flags); rtnl_unlock(); return 0; } static int qedi_uio_close(struct uio_info *uinfo, struct inode *inode) { struct qedi_uio_dev *udev = uinfo->priv; struct qedi_ctx *qedi = udev->qedi; udev->uio_dev = -1; clear_bit(UIO_DEV_OPENED, &qedi->flags); qedi_ll2_free_skbs(qedi); return 0; } static void __qedi_free_uio_rings(struct qedi_uio_dev *udev) { if (udev->uctrl) { free_page((unsigned long)udev->uctrl); udev->uctrl = NULL; } if (udev->ll2_ring) { free_page((unsigned long)udev->ll2_ring); udev->ll2_ring = NULL; } if (udev->ll2_buf) { free_pages((unsigned long)udev->ll2_buf, 2); udev->ll2_buf = NULL; } } static void __qedi_free_uio(struct qedi_uio_dev *udev) { uio_unregister_device(&udev->qedi_uinfo); __qedi_free_uio_rings(udev); pci_dev_put(udev->pdev); kfree(udev); } static void qedi_free_uio(struct qedi_uio_dev *udev) { if (!udev) return; list_del_init(&udev->list); __qedi_free_uio(udev); } static void qedi_reset_uio_rings(struct qedi_uio_dev *udev) { struct qedi_ctx *qedi = NULL; struct qedi_uio_ctrl *uctrl = NULL; qedi = udev->qedi; uctrl = udev->uctrl; spin_lock_bh(&qedi->ll2_lock); uctrl->host_rx_cons = 0; uctrl->hw_rx_prod = 0; uctrl->hw_rx_bd_prod = 0; uctrl->host_rx_bd_cons = 0; memset(udev->ll2_ring, 0, udev->ll2_ring_size); memset(udev->ll2_buf, 0, udev->ll2_buf_size); spin_unlock_bh(&qedi->ll2_lock); } static int __qedi_alloc_uio_rings(struct qedi_uio_dev *udev) { int rc = 0; if (udev->ll2_ring || udev->ll2_buf) return rc; /* Memory for control area. */ udev->uctrl = (void *)get_zeroed_page(GFP_KERNEL); if (!udev->uctrl) return -ENOMEM; /* Allocating memory for LL2 ring */ udev->ll2_ring_size = QEDI_PAGE_SIZE; udev->ll2_ring = (void *)get_zeroed_page(GFP_KERNEL | __GFP_COMP); if (!udev->ll2_ring) { rc = -ENOMEM; goto exit_alloc_ring; } /* Allocating memory for Tx/Rx pkt buffer */ udev->ll2_buf_size = TX_RX_RING * qedi_ll2_buf_size; udev->ll2_buf_size = QEDI_PAGE_ALIGN(udev->ll2_buf_size); udev->ll2_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_COMP | __GFP_ZERO, 2); if (!udev->ll2_buf) { rc = -ENOMEM; goto exit_alloc_buf; } return rc; exit_alloc_buf: free_page((unsigned long)udev->ll2_ring); udev->ll2_ring = NULL; exit_alloc_ring: return rc; } static int qedi_alloc_uio_rings(struct qedi_ctx *qedi) { struct qedi_uio_dev *udev = NULL; int rc = 0; list_for_each_entry(udev, &qedi_udev_list, list) { if (udev->pdev == qedi->pdev) { udev->qedi = qedi; if (__qedi_alloc_uio_rings(udev)) { udev->qedi = NULL; return -ENOMEM; } qedi->udev = udev; return 0; } } udev = kzalloc(sizeof(*udev), GFP_KERNEL); if (!udev) goto err_udev; udev->uio_dev = -1; udev->qedi = qedi; udev->pdev = qedi->pdev; rc = __qedi_alloc_uio_rings(udev); if (rc) goto err_uctrl; list_add(&udev->list, &qedi_udev_list); pci_dev_get(udev->pdev); qedi->udev = udev; udev->tx_pkt = udev->ll2_buf; udev->rx_pkt = udev->ll2_buf + qedi_ll2_buf_size; return 0; err_uctrl: kfree(udev); err_udev: return -ENOMEM; } static int qedi_init_uio(struct qedi_ctx *qedi) { struct qedi_uio_dev *udev = qedi->udev; struct uio_info *uinfo; int ret = 0; if (!udev) return -ENOMEM; uinfo = &udev->qedi_uinfo; uinfo->mem[0].addr = (unsigned long)udev->uctrl; uinfo->mem[0].size = sizeof(struct qedi_uio_ctrl); uinfo->mem[0].memtype = UIO_MEM_LOGICAL; uinfo->mem[1].addr = (unsigned long)udev->ll2_ring; uinfo->mem[1].size = udev->ll2_ring_size; uinfo->mem[1].memtype = UIO_MEM_LOGICAL; uinfo->mem[2].addr = (unsigned long)udev->ll2_buf; uinfo->mem[2].size = udev->ll2_buf_size; uinfo->mem[2].memtype = UIO_MEM_LOGICAL; uinfo->name = "qedi_uio"; uinfo->version = QEDI_MODULE_VERSION; uinfo->irq = UIO_IRQ_CUSTOM; uinfo->open = qedi_uio_open; uinfo->release = qedi_uio_close; if (udev->uio_dev == -1) { if (!uinfo->priv) { uinfo->priv = udev; ret = uio_register_device(&udev->pdev->dev, uinfo); if (ret) { QEDI_ERR(&qedi->dbg_ctx, "UIO registration failed\n"); } } } return ret; } static int qedi_alloc_and_init_sb(struct qedi_ctx *qedi, struct qed_sb_info *sb_info, u16 sb_id) { struct status_block *sb_virt; dma_addr_t sb_phys; int ret; sb_virt = dma_alloc_coherent(&qedi->pdev->dev, sizeof(struct status_block), &sb_phys, GFP_KERNEL); if (!sb_virt) { QEDI_ERR(&qedi->dbg_ctx, "Status block allocation failed for id = %d.\n", sb_id); return -ENOMEM; } ret = qedi_ops->common->sb_init(qedi->cdev, sb_info, sb_virt, sb_phys, sb_id, QED_SB_TYPE_STORAGE); if (ret) { QEDI_ERR(&qedi->dbg_ctx, "Status block initialization failed for id = %d.\n", sb_id); return ret; } return 0; } static void qedi_free_sb(struct qedi_ctx *qedi) { struct qed_sb_info *sb_info; int id; for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) { sb_info = &qedi->sb_array[id]; if (sb_info->sb_virt) dma_free_coherent(&qedi->pdev->dev, sizeof(*sb_info->sb_virt), (void *)sb_info->sb_virt, sb_info->sb_phys); } } static void qedi_free_fp(struct qedi_ctx *qedi) { kfree(qedi->fp_array); kfree(qedi->sb_array); } static void qedi_destroy_fp(struct qedi_ctx *qedi) { qedi_free_sb(qedi); qedi_free_fp(qedi); } static int qedi_alloc_fp(struct qedi_ctx *qedi) { int ret = 0; qedi->fp_array = kcalloc(MIN_NUM_CPUS_MSIX(qedi), sizeof(struct qedi_fastpath), GFP_KERNEL); if (!qedi->fp_array) { QEDI_ERR(&qedi->dbg_ctx, "fastpath fp array allocation failed.\n"); return -ENOMEM; } qedi->sb_array = kcalloc(MIN_NUM_CPUS_MSIX(qedi), sizeof(struct qed_sb_info), GFP_KERNEL); if (!qedi->sb_array) { QEDI_ERR(&qedi->dbg_ctx, "fastpath sb array allocation failed.\n"); ret = -ENOMEM; goto free_fp; } return ret; free_fp: qedi_free_fp(qedi); return ret; } static void qedi_int_fp(struct qedi_ctx *qedi) { struct qedi_fastpath *fp; int id; memset(qedi->fp_array, 0, MIN_NUM_CPUS_MSIX(qedi) * sizeof(*qedi->fp_array)); memset(qedi->sb_array, 0, MIN_NUM_CPUS_MSIX(qedi) * sizeof(*qedi->sb_array)); for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) { fp = &qedi->fp_array[id]; fp->sb_info = &qedi->sb_array[id]; fp->sb_id = id; fp->qedi = qedi; snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", "qedi", id); /* fp_array[i] ---- irq cookie * So init data which is needed in int ctx */ } } static int qedi_prepare_fp(struct qedi_ctx *qedi) { struct qedi_fastpath *fp; int id, ret = 0; ret = qedi_alloc_fp(qedi); if (ret) goto err; qedi_int_fp(qedi); for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) { fp = &qedi->fp_array[id]; ret = qedi_alloc_and_init_sb(qedi, fp->sb_info, fp->sb_id); if (ret) { QEDI_ERR(&qedi->dbg_ctx, "SB allocation and initialization failed.\n"); ret = -EIO; goto err_init; } } return 0; err_init: qedi_free_sb(qedi); qedi_free_fp(qedi); err: return ret; } static int qedi_setup_cid_que(struct qedi_ctx *qedi) { int i; qedi->cid_que.cid_que_base = kmalloc_array(qedi->max_active_conns, sizeof(u32), GFP_KERNEL); if (!qedi->cid_que.cid_que_base) return -ENOMEM; qedi->cid_que.conn_cid_tbl = kmalloc_array(qedi->max_active_conns, sizeof(struct qedi_conn *), GFP_KERNEL); if (!qedi->cid_que.conn_cid_tbl) { kfree(qedi->cid_que.cid_que_base); qedi->cid_que.cid_que_base = NULL; return -ENOMEM; } qedi->cid_que.cid_que = (u32 *)qedi->cid_que.cid_que_base; qedi->cid_que.cid_q_prod_idx = 0; qedi->cid_que.cid_q_cons_idx = 0; qedi->cid_que.cid_q_max_idx = qedi->max_active_conns; qedi->cid_que.cid_free_cnt = qedi->max_active_conns; for (i = 0; i < qedi->max_active_conns; i++) { qedi->cid_que.cid_que[i] = i; qedi->cid_que.conn_cid_tbl[i] = NULL; } return 0; } static void qedi_release_cid_que(struct qedi_ctx *qedi) { kfree(qedi->cid_que.cid_que_base); qedi->cid_que.cid_que_base = NULL; kfree(qedi->cid_que.conn_cid_tbl); qedi->cid_que.conn_cid_tbl = NULL; } static int qedi_init_id_tbl(struct qedi_portid_tbl *id_tbl, u16 size, u16 start_id, u16 next) { id_tbl->start = start_id; id_tbl->max = size; id_tbl->next = next; spin_lock_init(&id_tbl->lock); id_tbl->table = kcalloc(BITS_TO_LONGS(size), sizeof(long), GFP_KERNEL); if (!id_tbl->table) return -ENOMEM; return 0; } static void qedi_free_id_tbl(struct qedi_portid_tbl *id_tbl) { kfree(id_tbl->table); id_tbl->table = NULL; } int qedi_alloc_id(struct qedi_portid_tbl *id_tbl, u16 id) { int ret = -1; id -= id_tbl->start; if (id >= id_tbl->max) return ret; spin_lock(&id_tbl->lock); if (!test_bit(id, id_tbl->table)) { set_bit(id, id_tbl->table); ret = 0; } spin_unlock(&id_tbl->lock); return ret; } u16 qedi_alloc_new_id(struct qedi_portid_tbl *id_tbl) { u16 id; spin_lock(&id_tbl->lock); id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next); if (id >= id_tbl->max) { id = QEDI_LOCAL_PORT_INVALID; if (id_tbl->next != 0) { id = find_first_zero_bit(id_tbl->table, id_tbl->next); if (id >= id_tbl->next) id = QEDI_LOCAL_PORT_INVALID; } } if (id < id_tbl->max) { set_bit(id, id_tbl->table); id_tbl->next = (id + 1) & (id_tbl->max - 1); id += id_tbl->start; } spin_unlock(&id_tbl->lock); return id; } void qedi_free_id(struct qedi_portid_tbl *id_tbl, u16 id) { if (id == QEDI_LOCAL_PORT_INVALID) return; id -= id_tbl->start; if (id >= id_tbl->max) return; clear_bit(id, id_tbl->table); } static void qedi_cm_free_mem(struct qedi_ctx *qedi) { kfree(qedi->ep_tbl); qedi->ep_tbl = NULL; qedi_free_id_tbl(&qedi->lcl_port_tbl); } static int qedi_cm_alloc_mem(struct qedi_ctx *qedi) { u16 port_id; qedi->ep_tbl = kzalloc((qedi->max_active_conns * sizeof(struct qedi_endpoint *)), GFP_KERNEL); if (!qedi->ep_tbl) return -ENOMEM; port_id = get_random_u32_below(QEDI_LOCAL_PORT_RANGE); if (qedi_init_id_tbl(&qedi->lcl_port_tbl, QEDI_LOCAL_PORT_RANGE, QEDI_LOCAL_PORT_MIN, port_id)) { qedi_cm_free_mem(qedi); return -ENOMEM; } return 0; } static struct qedi_ctx *qedi_host_alloc(struct pci_dev *pdev) { struct Scsi_Host *shost; struct qedi_ctx *qedi = NULL; shost = iscsi_host_alloc(&qedi_host_template, sizeof(struct qedi_ctx), 0); if (!shost) { QEDI_ERR(NULL, "Could not allocate shost\n"); goto exit_setup_shost; } shost->max_id = QEDI_MAX_ISCSI_CONNS_PER_HBA - 1; shost->max_channel = 0; shost->max_lun = ~0; shost->max_cmd_len = 16; shost->transportt = qedi_scsi_transport; qedi = iscsi_host_priv(shost); memset(qedi, 0, sizeof(*qedi)); qedi->shost = shost; qedi->dbg_ctx.host_no = shost->host_no; qedi->pdev = pdev; qedi->dbg_ctx.pdev = pdev; qedi->max_active_conns = ISCSI_MAX_SESS_PER_HBA; qedi->max_sqes = QEDI_SQ_SIZE; shost->nr_hw_queues = MIN_NUM_CPUS_MSIX(qedi); pci_set_drvdata(pdev, qedi); exit_setup_shost: return qedi; } static int qedi_ll2_rx(void *cookie, struct sk_buff *skb, u32 arg1, u32 arg2) { struct qedi_ctx *qedi = (struct qedi_ctx *)cookie; struct skb_work_list *work; struct ethhdr *eh; if (!qedi) { QEDI_ERR(NULL, "qedi is NULL\n"); return -1; } if (!test_bit(UIO_DEV_OPENED, &qedi->flags)) { QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_UIO, "UIO DEV is not opened\n"); kfree_skb(skb); return 0; } eh = (struct ethhdr *)skb->data; /* Undo VLAN encapsulation */ if (eh->h_proto == htons(ETH_P_8021Q)) { memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2); eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN); skb_reset_mac_header(skb); } /* Filter out non FIP/FCoE frames here to free them faster */ if (eh->h_proto != htons(ETH_P_ARP) && eh->h_proto != htons(ETH_P_IP) && eh->h_proto != htons(ETH_P_IPV6)) { QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_LL2, "Dropping frame ethertype [0x%x] len [0x%x].\n", eh->h_proto, skb->len); kfree_skb(skb); return 0; } QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_LL2, "Allowed frame ethertype [0x%x] len [0x%x].\n", eh->h_proto, skb->len); work = kzalloc(sizeof(*work), GFP_ATOMIC); if (!work) { QEDI_WARN(&qedi->dbg_ctx, "Could not allocate work so dropping frame.\n"); kfree_skb(skb); return 0; } INIT_LIST_HEAD(&work->list); work->skb = skb; if (skb_vlan_tag_present(skb)) work->vlan_id = skb_vlan_tag_get(skb); if (work->vlan_id) __vlan_insert_tag(work->skb, htons(ETH_P_8021Q), work->vlan_id); spin_lock_bh(&qedi->ll2_lock); list_add_tail(&work->list, &qedi->ll2_skb_list); spin_unlock_bh(&qedi->ll2_lock); wake_up_process(qedi->ll2_recv_thread); return 0; } /* map this skb to iscsiuio mmaped region */ static int qedi_ll2_process_skb(struct qedi_ctx *qedi, struct sk_buff *skb, u16 vlan_id) { struct qedi_uio_dev *udev = NULL; struct qedi_uio_ctrl *uctrl = NULL; struct qedi_rx_bd rxbd; struct qedi_rx_bd *p_rxbd; u32 rx_bd_prod; void *pkt; int len = 0; u32 prod; if (!qedi) { QEDI_ERR(NULL, "qedi is NULL\n"); return -1; } udev = qedi->udev; uctrl = udev->uctrl; ++uctrl->hw_rx_prod_cnt; prod = (uctrl->hw_rx_prod + 1) % RX_RING; pkt = udev->rx_pkt + (prod * qedi_ll2_buf_size); len = min_t(u32, skb->len, (u32)qedi_ll2_buf_size); memcpy(pkt, skb->data, len); memset(&rxbd, 0, sizeof(rxbd)); rxbd.rx_pkt_index = prod; rxbd.rx_pkt_len = len; rxbd.vlan_id = vlan_id; uctrl->hw_rx_bd_prod = (uctrl->hw_rx_bd_prod + 1) % QEDI_NUM_RX_BD; rx_bd_prod = uctrl->hw_rx_bd_prod; p_rxbd = (struct qedi_rx_bd *)udev->ll2_ring; p_rxbd += rx_bd_prod; memcpy(p_rxbd, &rxbd, sizeof(rxbd)); QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_LL2, "hw_rx_prod [%d] prod [%d] hw_rx_bd_prod [%d] rx_pkt_idx [%d] rx_len [%d].\n", uctrl->hw_rx_prod, prod, uctrl->hw_rx_bd_prod, rxbd.rx_pkt_index, rxbd.rx_pkt_len); QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_LL2, "host_rx_cons [%d] hw_rx_bd_cons [%d].\n", uctrl->host_rx_cons, uctrl->host_rx_bd_cons); uctrl->hw_rx_prod = prod; /* notify the iscsiuio about new packet */ uio_event_notify(&udev->qedi_uinfo); return 0; } static void qedi_ll2_free_skbs(struct qedi_ctx *qedi) { struct skb_work_list *work, *work_tmp; spin_lock_bh(&qedi->ll2_lock); list_for_each_entry_safe(work, work_tmp, &qedi->ll2_skb_list, list) { list_del(&work->list); kfree_skb(work->skb); kfree(work); } spin_unlock_bh(&qedi->ll2_lock); } static int qedi_ll2_recv_thread(void *arg) { struct qedi_ctx *qedi = (struct qedi_ctx *)arg; struct skb_work_list *work, *work_tmp; set_user_nice(current, -20); while (!kthread_should_stop()) { spin_lock_bh(&qedi->ll2_lock); list_for_each_entry_safe(work, work_tmp, &qedi->ll2_skb_list, list) { list_del(&work->list); qedi_ll2_process_skb(qedi, work->skb, work->vlan_id); kfree_skb(work->skb); kfree(work); } set_current_state(TASK_INTERRUPTIBLE); spin_unlock_bh(&qedi->ll2_lock); schedule(); } __set_current_state(TASK_RUNNING); return 0; } static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi) { u8 num_sq_pages; u32 log_page_size; int rval = 0; num_sq_pages = (MAX_OUTSTANDING_TASKS_PER_CON * 8) / QEDI_PAGE_SIZE; qedi->num_queues = MIN_NUM_CPUS_MSIX(qedi); QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "Number of CQ count is %d\n", qedi->num_queues); memset(&qedi->pf_params.iscsi_pf_params, 0, sizeof(qedi->pf_params.iscsi_pf_params)); qedi->p_cpuq = dma_alloc_coherent(&qedi->pdev->dev, qedi->num_queues * sizeof(struct qedi_glbl_q_params), &qedi->hw_p_cpuq, GFP_KERNEL); if (!qedi->p_cpuq) { QEDI_ERR(&qedi->dbg_ctx, "dma_alloc_coherent fail\n"); rval = -1; goto err_alloc_mem; } rval = qedi_alloc_global_queues(qedi); if (rval) { QEDI_ERR(&qedi->dbg_ctx, "Global queue allocation failed.\n"); rval = -1; goto err_alloc_mem; } qedi->pf_params.iscsi_pf_params.num_cons = QEDI_MAX_ISCSI_CONNS_PER_HBA; qedi->pf_params.iscsi_pf_params.num_tasks = QEDI_MAX_ISCSI_TASK; qedi->pf_params.iscsi_pf_params.half_way_close_timeout = 10; qedi->pf_params.iscsi_pf_params.num_sq_pages_in_ring = num_sq_pages; qedi->pf_params.iscsi_pf_params.num_r2tq_pages_in_ring = num_sq_pages; qedi->pf_params.iscsi_pf_params.num_uhq_pages_in_ring = num_sq_pages; qedi->pf_params.iscsi_pf_params.num_queues = qedi->num_queues; qedi->pf_params.iscsi_pf_params.debug_mode = qedi_fw_debug; qedi->pf_params.iscsi_pf_params.two_msl_timer = QED_TWO_MSL_TIMER_DFLT; qedi->pf_params.iscsi_pf_params.tx_sws_timer = QED_TX_SWS_TIMER_DFLT; qedi->pf_params.iscsi_pf_params.max_fin_rt = 2; for (log_page_size = 0 ; log_page_size < 32 ; log_page_size++) { if ((1 << log_page_size) == QEDI_PAGE_SIZE) break; } qedi->pf_params.iscsi_pf_params.log_page_size = log_page_size; qedi->pf_params.iscsi_pf_params.glbl_q_params_addr = (u64)qedi->hw_p_cpuq; /* RQ BDQ initializations. * rq_num_entries: suggested value for Initiator is 16 (4KB RQ) * rqe_log_size: 8 for 256B RQE */ qedi->pf_params.iscsi_pf_params.rqe_log_size = 8; /* BDQ address and size */ qedi->pf_params.iscsi_pf_params.bdq_pbl_base_addr[BDQ_ID_RQ] = qedi->bdq_pbl_list_dma; qedi->pf_params.iscsi_pf_params.bdq_pbl_num_entries[BDQ_ID_RQ] = qedi->bdq_pbl_list_num_entries; qedi->pf_params.iscsi_pf_params.rq_buffer_size = QEDI_BDQ_BUF_SIZE; /* cq_num_entries: num_tasks + rq_num_entries */ qedi->pf_params.iscsi_pf_params.cq_num_entries = 2048; qedi->pf_params.iscsi_pf_params.gl_rq_pi = QEDI_PROTO_CQ_PROD_IDX; qedi->pf_params.iscsi_pf_params.gl_cmd_pi = 1; err_alloc_mem: return rval; } /* Free DMA coherent memory for array of queue pointers we pass to qed */ static void qedi_free_iscsi_pf_param(struct qedi_ctx *qedi) { size_t size = 0; if (qedi->p_cpuq) { size = qedi->num_queues * sizeof(struct qedi_glbl_q_params); dma_free_coherent(&qedi->pdev->dev, size, qedi->p_cpuq, qedi->hw_p_cpuq); } qedi_free_global_queues(qedi); kfree(qedi->global_queues); } static void qedi_get_boot_tgt_info(struct nvm_iscsi_block *block, struct qedi_boot_target *tgt, u8 index) { u32 ipv6_en; ipv6_en = !!(block->generic.ctrl_flags & NVM_ISCSI_CFG_GEN_IPV6_ENABLED); snprintf(tgt->iscsi_name, sizeof(tgt->iscsi_name), "%s", block->target[index].target_name.byte); tgt->ipv6_en = ipv6_en; if (ipv6_en) snprintf(tgt->ip_addr, IPV6_LEN, "%pI6\n", block->target[index].ipv6_addr.byte); else snprintf(tgt->ip_addr, IPV4_LEN, "%pI4\n", block->target[index].ipv4_addr.byte); } static int qedi_find_boot_info(struct qedi_ctx *qedi, struct qed_mfw_tlv_iscsi *iscsi, struct nvm_iscsi_block *block) { struct qedi_boot_target *pri_tgt = NULL, *sec_tgt = NULL; u32 pri_ctrl_flags = 0, sec_ctrl_flags = 0, found = 0; struct iscsi_cls_session *cls_sess; struct iscsi_cls_conn *cls_conn; struct qedi_conn *qedi_conn; struct iscsi_session *sess; struct iscsi_conn *conn; char ep_ip_addr[64]; int i, ret = 0; pri_ctrl_flags = !!(block->target[0].ctrl_flags & NVM_ISCSI_CFG_TARGET_ENABLED); if (pri_ctrl_flags) { pri_tgt = kzalloc(sizeof(*pri_tgt), GFP_KERNEL); if (!pri_tgt) return -1; qedi_get_boot_tgt_info(block, pri_tgt, 0); } sec_ctrl_flags = !!(block->target[1].ctrl_flags & NVM_ISCSI_CFG_TARGET_ENABLED); if (sec_ctrl_flags) { sec_tgt = kzalloc(sizeof(*sec_tgt), GFP_KERNEL); if (!sec_tgt) { ret = -1; goto free_tgt; } qedi_get_boot_tgt_info(block, sec_tgt, 1); } for (i = 0; i < qedi->max_active_conns; i++) { qedi_conn = qedi_get_conn_from_id(qedi, i); if (!qedi_conn) continue; if (qedi_conn->ep->ip_type == TCP_IPV4) snprintf(ep_ip_addr, IPV4_LEN, "%pI4\n", qedi_conn->ep->dst_addr); else snprintf(ep_ip_addr, IPV6_LEN, "%pI6\n", qedi_conn->ep->dst_addr); cls_conn = qedi_conn->cls_conn; conn = cls_conn->dd_data; cls_sess = iscsi_conn_to_session(cls_conn); sess = cls_sess->dd_data; if (!iscsi_is_session_online(cls_sess)) continue; if (!sess->targetname) continue; if (pri_ctrl_flags) { if (!strcmp(pri_tgt->iscsi_name, sess->targetname) && !strcmp(pri_tgt->ip_addr, ep_ip_addr)) { found = 1; break; } } if (sec_ctrl_flags) { if (!strcmp(sec_tgt->iscsi_name, sess->targetname) && !strcmp(sec_tgt->ip_addr, ep_ip_addr)) { found = 1; break; } } } if (found) { if (conn->hdrdgst_en) { iscsi->header_digest_set = true; iscsi->header_digest = 1; } if (conn->datadgst_en) { iscsi->data_digest_set = true; iscsi->data_digest = 1; } iscsi->boot_taget_portal_set = true; iscsi->boot_taget_portal = sess->tpgt; } else { ret = -1; } if (sec_ctrl_flags) kfree(sec_tgt); free_tgt: if (pri_ctrl_flags) kfree(pri_tgt); return ret; } static void qedi_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data) { struct qedi_ctx *qedi; if (!dev) { QEDI_INFO(NULL, QEDI_LOG_EVT, "dev is NULL so ignoring get_generic_tlv_data request.\n"); return; } qedi = (struct qedi_ctx *)dev; memset(data, 0, sizeof(struct qed_generic_tlvs)); ether_addr_copy(data->mac[0], qedi->mac); } /* * Protocol TLV handler */ static void qedi_get_protocol_tlv_data(void *dev, void *data) { struct qed_mfw_tlv_iscsi *iscsi = data; struct qed_iscsi_stats *fw_iscsi_stats; struct nvm_iscsi_block *block = NULL; u32 chap_en = 0, mchap_en = 0; struct qedi_ctx *qedi = dev; int rval = 0; fw_iscsi_stats = kmalloc(sizeof(*fw_iscsi_stats), GFP_KERNEL); if (!fw_iscsi_stats) { QEDI_ERR(&qedi->dbg_ctx, "Could not allocate memory for fw_iscsi_stats.\n"); goto exit_get_data; } mutex_lock(&qedi->stats_lock); /* Query firmware for offload stats */ qedi_ops->get_stats(qedi->cdev, fw_iscsi_stats); mutex_unlock(&qedi->stats_lock); iscsi->rx_frames_set = true; iscsi->rx_frames = fw_iscsi_stats->iscsi_rx_packet_cnt; iscsi->rx_bytes_set = true; iscsi->rx_bytes = fw_iscsi_stats->iscsi_rx_bytes_cnt; iscsi->tx_frames_set = true; iscsi->tx_frames = fw_iscsi_stats->iscsi_tx_packet_cnt; iscsi->tx_bytes_set = true; iscsi->tx_bytes = fw_iscsi_stats->iscsi_tx_bytes_cnt; iscsi->frame_size_set = true; iscsi->frame_size = qedi->ll2_mtu; block = qedi_get_nvram_block(qedi); if (block) { chap_en = !!(block->generic.ctrl_flags & NVM_ISCSI_CFG_GEN_CHAP_ENABLED); mchap_en = !!(block->generic.ctrl_flags & NVM_ISCSI_CFG_GEN_CHAP_MUTUAL_ENABLED); iscsi->auth_method_set = (chap_en || mchap_en) ? true : false; iscsi->auth_method = 1; if (chap_en) iscsi->auth_method = 2; if (mchap_en) iscsi->auth_method = 3; iscsi->tx_desc_size_set = true; iscsi->tx_desc_size = QEDI_SQ_SIZE; iscsi->rx_desc_size_set = true; iscsi->rx_desc_size = QEDI_CQ_SIZE; /* tpgt, hdr digest, data digest */ rval = qedi_find_boot_info(qedi, iscsi, block); if (rval) QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "Boot target not set"); } kfree(fw_iscsi_stats); exit_get_data: return; } void qedi_schedule_hw_err_handler(void *dev, enum qed_hw_err_type err_type) { struct qedi_ctx *qedi = (struct qedi_ctx *)dev; unsigned long override_flags = qedi_flags_override; if (override_flags && test_bit(QEDI_ERR_OVERRIDE_EN, &override_flags)) qedi->qedi_err_flags = qedi_flags_override; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "HW error handler scheduled, err=%d err_flags=0x%x\n", err_type, qedi->qedi_err_flags); switch (err_type) { case QED_HW_ERR_FAN_FAIL: schedule_delayed_work(&qedi->board_disable_work, 0); break; case QED_HW_ERR_MFW_RESP_FAIL: case QED_HW_ERR_HW_ATTN: case QED_HW_ERR_DMAE_FAIL: case QED_HW_ERR_RAMROD_FAIL: case QED_HW_ERR_FW_ASSERT: /* Prevent HW attentions from being reasserted */ if (test_bit(QEDI_ERR_ATTN_CLR_EN, &qedi->qedi_err_flags)) qedi_ops->common->attn_clr_enable(qedi->cdev, true); if (err_type == QED_HW_ERR_RAMROD_FAIL && test_bit(QEDI_ERR_IS_RECOVERABLE, &qedi->qedi_err_flags)) qedi_ops->common->recovery_process(qedi->cdev); break; default: break; } } static void qedi_schedule_recovery_handler(void *dev) { struct qedi_ctx *qedi = dev; QEDI_ERR(&qedi->dbg_ctx, "Recovery handler scheduled.\n"); if (test_and_set_bit(QEDI_IN_RECOVERY, &qedi->flags)) return; atomic_set(&qedi->link_state, QEDI_LINK_DOWN); schedule_delayed_work(&qedi->recovery_work, 0); } static void qedi_set_conn_recovery(struct iscsi_cls_session *cls_session) { struct iscsi_session *session = cls_session->dd_data; struct iscsi_conn *conn = session->leadconn; struct qedi_conn *qedi_conn = conn->dd_data; qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn); } static void qedi_link_update(void *dev, struct qed_link_output *link) { struct qedi_ctx *qedi = (struct qedi_ctx *)dev; if (link->link_up) { QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "Link Up event.\n"); atomic_set(&qedi->link_state, QEDI_LINK_UP); } else { QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "Link Down event.\n"); atomic_set(&qedi->link_state, QEDI_LINK_DOWN); iscsi_host_for_each_session(qedi->shost, qedi_set_conn_recovery); } } static struct qed_iscsi_cb_ops qedi_cb_ops = { { .link_update = qedi_link_update, .schedule_recovery_handler = qedi_schedule_recovery_handler, .schedule_hw_err_handler = qedi_schedule_hw_err_handler, .get_protocol_tlv_data = qedi_get_protocol_tlv_data, .get_generic_tlv_data = qedi_get_generic_tlv_data, } }; static int qedi_queue_cqe(struct qedi_ctx *qedi, union iscsi_cqe *cqe, u16 que_idx, struct qedi_percpu_s *p) { struct qedi_work *qedi_work; struct qedi_conn *q_conn; struct qedi_cmd *qedi_cmd; u32 iscsi_cid; int rc = 0; iscsi_cid = cqe->cqe_common.conn_id; q_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid]; if (!q_conn) { QEDI_WARN(&qedi->dbg_ctx, "Session no longer exists for cid=0x%x!!\n", iscsi_cid); return -1; } switch (cqe->cqe_common.cqe_type) { case ISCSI_CQE_TYPE_SOLICITED: case ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE: qedi_cmd = qedi_get_cmd_from_tid(qedi, cqe->cqe_solicited.itid); if (!qedi_cmd) { rc = -1; break; } INIT_LIST_HEAD(&qedi_cmd->cqe_work.list); qedi_cmd->cqe_work.qedi = qedi; memcpy(&qedi_cmd->cqe_work.cqe, cqe, sizeof(union iscsi_cqe)); qedi_cmd->cqe_work.que_idx = que_idx; qedi_cmd->cqe_work.is_solicited = true; list_add_tail(&qedi_cmd->cqe_work.list, &p->work_list); break; case ISCSI_CQE_TYPE_UNSOLICITED: case ISCSI_CQE_TYPE_DUMMY: case ISCSI_CQE_TYPE_TASK_CLEANUP: qedi_work = kzalloc(sizeof(*qedi_work), GFP_ATOMIC); if (!qedi_work) { rc = -1; break; } INIT_LIST_HEAD(&qedi_work->list); qedi_work->qedi = qedi; memcpy(&qedi_work->cqe, cqe, sizeof(union iscsi_cqe)); qedi_work->que_idx = que_idx; qedi_work->is_solicited = false; list_add_tail(&qedi_work->list, &p->work_list); break; default: rc = -1; QEDI_ERR(&qedi->dbg_ctx, "FW Error cqe.\n"); } return rc; } static bool qedi_process_completions(struct qedi_fastpath *fp) { struct qedi_ctx *qedi = fp->qedi; struct qed_sb_info *sb_info = fp->sb_info; struct status_block *sb = sb_info->sb_virt; struct qedi_percpu_s *p = NULL; struct global_queue *que; u16 prod_idx; unsigned long flags; union iscsi_cqe *cqe; int cpu; int ret; /* Get the current firmware producer index */ prod_idx = sb->pi_array[QEDI_PROTO_CQ_PROD_IDX]; if (prod_idx >= QEDI_CQ_SIZE) prod_idx = prod_idx % QEDI_CQ_SIZE; que = qedi->global_queues[fp->sb_id]; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "Before: global queue=%p prod_idx=%d cons_idx=%d, sb_id=%d\n", que, prod_idx, que->cq_cons_idx, fp->sb_id); qedi->intr_cpu = fp->sb_id; cpu = smp_processor_id(); p = &per_cpu(qedi_percpu, cpu); if (unlikely(!p->iothread)) WARN_ON(1); spin_lock_irqsave(&p->p_work_lock, flags); while (que->cq_cons_idx != prod_idx) { cqe = &que->cq[que->cq_cons_idx]; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "cqe=%p prod_idx=%d cons_idx=%d.\n", cqe, prod_idx, que->cq_cons_idx); ret = qedi_queue_cqe(qedi, cqe, fp->sb_id, p); if (ret) QEDI_WARN(&qedi->dbg_ctx, "Dropping CQE 0x%x for cid=0x%x.\n", que->cq_cons_idx, cqe->cqe_common.conn_id); que->cq_cons_idx++; if (que->cq_cons_idx == QEDI_CQ_SIZE) que->cq_cons_idx = 0; } wake_up_process(p->iothread); spin_unlock_irqrestore(&p->p_work_lock, flags); return true; } static bool qedi_fp_has_work(struct qedi_fastpath *fp) { struct qedi_ctx *qedi = fp->qedi; struct global_queue *que; struct qed_sb_info *sb_info = fp->sb_info; struct status_block *sb = sb_info->sb_virt; u16 prod_idx; barrier(); /* Get the current firmware producer index */ prod_idx = sb->pi_array[QEDI_PROTO_CQ_PROD_IDX]; /* Get the pointer to the global CQ this completion is on */ que = qedi->global_queues[fp->sb_id]; /* prod idx wrap around uint16 */ if (prod_idx >= QEDI_CQ_SIZE) prod_idx = prod_idx % QEDI_CQ_SIZE; return (que->cq_cons_idx != prod_idx); } /* MSI-X fastpath handler code */ static irqreturn_t qedi_msix_handler(int irq, void *dev_id) { struct qedi_fastpath *fp = dev_id; struct qedi_ctx *qedi = fp->qedi; bool wake_io_thread = true; qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0); process_again: wake_io_thread = qedi_process_completions(fp); if (wake_io_thread) { QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, "process already running\n"); } if (!qedi_fp_has_work(fp)) qed_sb_update_sb_idx(fp->sb_info); /* Check for more work */ rmb(); if (!qedi_fp_has_work(fp)) qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1); else goto process_again; return IRQ_HANDLED; } /* simd handler for MSI/INTa */ static void qedi_simd_int_handler(void *cookie) { /* Cookie is qedi_ctx struct */ struct qedi_ctx *qedi = (struct qedi_ctx *)cookie; QEDI_WARN(&qedi->dbg_ctx, "qedi=%p.\n", qedi); } #define QEDI_SIMD_HANDLER_NUM 0 static void qedi_sync_free_irqs(struct qedi_ctx *qedi) { int i; u16 idx; if (qedi->int_info.msix_cnt) { for (i = 0; i < qedi->int_info.used_cnt; i++) { idx = i * qedi->dev_info.common.num_hwfns + qedi_ops->common->get_affin_hwfn_idx(qedi->cdev); QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "Freeing IRQ #%d vector_idx=%d.\n", i, idx); synchronize_irq(qedi->int_info.msix[idx].vector); irq_set_affinity_hint(qedi->int_info.msix[idx].vector, NULL); free_irq(qedi->int_info.msix[idx].vector, &qedi->fp_array[i]); } } else { qedi_ops->common->simd_handler_clean(qedi->cdev, QEDI_SIMD_HANDLER_NUM); } qedi->int_info.used_cnt = 0; qedi_ops->common->set_fp_int(qedi->cdev, 0); } static int qedi_request_msix_irq(struct qedi_ctx *qedi) { int i, rc, cpu; u16 idx; cpu = cpumask_first(cpu_online_mask); for (i = 0; i < qedi->msix_count; i++) { idx = i * qedi->dev_info.common.num_hwfns + qedi_ops->common->get_affin_hwfn_idx(qedi->cdev); QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "dev_info: num_hwfns=%d affin_hwfn_idx=%d.\n", qedi->dev_info.common.num_hwfns, qedi_ops->common->get_affin_hwfn_idx(qedi->cdev)); rc = request_irq(qedi->int_info.msix[idx].vector, qedi_msix_handler, 0, "qedi", &qedi->fp_array[i]); if (rc) { QEDI_WARN(&qedi->dbg_ctx, "request_irq failed.\n"); qedi_sync_free_irqs(qedi); return rc; } qedi->int_info.used_cnt++; rc = irq_set_affinity_hint(qedi->int_info.msix[idx].vector, get_cpu_mask(cpu)); cpu = cpumask_next(cpu, cpu_online_mask); } return 0; } static int qedi_setup_int(struct qedi_ctx *qedi) { int rc = 0; rc = qedi_ops->common->set_fp_int(qedi->cdev, qedi->num_queues); if (rc < 0) goto exit_setup_int; qedi->msix_count = rc; rc = qedi_ops->common->get_fp_int(qedi->cdev, &qedi->int_info); if (rc) goto exit_setup_int; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, "Number of msix_cnt = 0x%x num of cpus = 0x%x\n", qedi->int_info.msix_cnt, num_online_cpus()); if (qedi->int_info.msix_cnt) { rc = qedi_request_msix_irq(qedi); goto exit_setup_int; } else { qedi_ops->common->simd_handler_config(qedi->cdev, &qedi, QEDI_SIMD_HANDLER_NUM, qedi_simd_int_handler); qedi->int_info.used_cnt = 1; } exit_setup_int: return rc; } static void qedi_free_nvm_iscsi_cfg(struct qedi_ctx *qedi) { if (qedi->iscsi_image) dma_free_coherent(&qedi->pdev->dev, sizeof(struct qedi_nvm_iscsi_image), qedi->iscsi_image, qedi->nvm_buf_dma); } static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi) { qedi->iscsi_image = dma_alloc_coherent(&qedi->pdev->dev, sizeof(struct qedi_nvm_iscsi_image), &qedi->nvm_buf_dma, GFP_KERNEL); if (!qedi->iscsi_image) { QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n"); return -ENOMEM; } QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "NVM BUF addr=0x%p dma=0x%llx.\n", qedi->iscsi_image, qedi->nvm_buf_dma); return 0; } static void qedi_free_bdq(struct qedi_ctx *qedi) { int i; if (qedi->bdq_pbl_list) dma_free_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE, qedi->bdq_pbl_list, qedi->bdq_pbl_list_dma); if (qedi->bdq_pbl) dma_free_coherent(&qedi->pdev->dev, qedi->bdq_pbl_mem_size, qedi->bdq_pbl, qedi->bdq_pbl_dma); for (i = 0; i < QEDI_BDQ_NUM; i++) { if (qedi->bdq[i].buf_addr) { dma_free_coherent(&qedi->pdev->dev, QEDI_BDQ_BUF_SIZE, qedi->bdq[i].buf_addr, qedi->bdq[i].buf_dma); } } } static void qedi_free_global_queues(struct qedi_ctx *qedi) { int i; struct global_queue **gl = qedi->global_queues; for (i = 0; i < qedi->num_queues; i++) { if (!gl[i]) continue; if (gl[i]->cq) dma_free_coherent(&qedi->pdev->dev, gl[i]->cq_mem_size, gl[i]->cq, gl[i]->cq_dma); if (gl[i]->cq_pbl) dma_free_coherent(&qedi->pdev->dev, gl[i]->cq_pbl_size, gl[i]->cq_pbl, gl[i]->cq_pbl_dma); kfree(gl[i]); } qedi_free_bdq(qedi); qedi_free_nvm_iscsi_cfg(qedi); } static int qedi_alloc_bdq(struct qedi_ctx *qedi) { int i; struct scsi_bd *pbl; u64 *list; /* Alloc dma memory for BDQ buffers */ for (i = 0; i < QEDI_BDQ_NUM; i++) { qedi->bdq[i].buf_addr = dma_alloc_coherent(&qedi->pdev->dev, QEDI_BDQ_BUF_SIZE, &qedi->bdq[i].buf_dma, GFP_KERNEL); if (!qedi->bdq[i].buf_addr) { QEDI_ERR(&qedi->dbg_ctx, "Could not allocate BDQ buffer %d.\n", i); return -ENOMEM; } } /* Alloc dma memory for BDQ page buffer list */ qedi->bdq_pbl_mem_size = QEDI_BDQ_NUM * sizeof(struct scsi_bd); qedi->bdq_pbl_mem_size = ALIGN(qedi->bdq_pbl_mem_size, QEDI_PAGE_SIZE); qedi->rq_num_entries = qedi->bdq_pbl_mem_size / sizeof(struct scsi_bd); QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "rq_num_entries = %d.\n", qedi->rq_num_entries); qedi->bdq_pbl = dma_alloc_coherent(&qedi->pdev->dev, qedi->bdq_pbl_mem_size, &qedi->bdq_pbl_dma, GFP_KERNEL); if (!qedi->bdq_pbl) { QEDI_ERR(&qedi->dbg_ctx, "Could not allocate BDQ PBL.\n"); return -ENOMEM; } /* * Populate BDQ PBL with physical and virtual address of individual * BDQ buffers */ pbl = (struct scsi_bd *)qedi->bdq_pbl; for (i = 0; i < QEDI_BDQ_NUM; i++) { pbl->address.hi = cpu_to_le32(QEDI_U64_HI(qedi->bdq[i].buf_dma)); pbl->address.lo = cpu_to_le32(QEDI_U64_LO(qedi->bdq[i].buf_dma)); QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx], idx [%d]\n", pbl, pbl->address.hi, pbl->address.lo, i); pbl->opaque.iscsi_opaque.reserved_zero[0] = 0; pbl->opaque.iscsi_opaque.reserved_zero[1] = 0; pbl->opaque.iscsi_opaque.reserved_zero[2] = 0; pbl->opaque.iscsi_opaque.opaque = cpu_to_le16(i); pbl++; } /* Allocate list of PBL pages */ qedi->bdq_pbl_list = dma_alloc_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE, &qedi->bdq_pbl_list_dma, GFP_KERNEL); if (!qedi->bdq_pbl_list) { QEDI_ERR(&qedi->dbg_ctx, "Could not allocate list of PBL pages.\n"); return -ENOMEM; } /* * Now populate PBL list with pages that contain pointers to the * individual buffers. */ qedi->bdq_pbl_list_num_entries = qedi->bdq_pbl_mem_size / QEDI_PAGE_SIZE; list = (u64 *)qedi->bdq_pbl_list; for (i = 0; i < qedi->bdq_pbl_list_num_entries; i++) { *list = qedi->bdq_pbl_dma; list++; } return 0; } static int qedi_alloc_global_queues(struct qedi_ctx *qedi) { u32 *list; int i; int status; u32 *pbl; dma_addr_t page; int num_pages; /* * Number of global queues (CQ / RQ). This should * be <= number of available MSIX vectors for the PF */ if (!qedi->num_queues) { QEDI_ERR(&qedi->dbg_ctx, "No MSI-X vectors available!\n"); return -ENOMEM; } /* Make sure we allocated the PBL that will contain the physical * addresses of our queues */ if (!qedi->p_cpuq) { status = -EINVAL; goto mem_alloc_failure; } qedi->global_queues = kzalloc((sizeof(struct global_queue *) * qedi->num_queues), GFP_KERNEL); if (!qedi->global_queues) { QEDI_ERR(&qedi->dbg_ctx, "Unable to allocate global queues array ptr memory\n"); return -ENOMEM; } QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, "qedi->global_queues=%p.\n", qedi->global_queues); /* Allocate DMA coherent buffers for BDQ */ status = qedi_alloc_bdq(qedi); if (status) goto mem_alloc_failure; /* Allocate DMA coherent buffers for NVM_ISCSI_CFG */ status = qedi_alloc_nvm_iscsi_cfg(qedi); if (status) goto mem_alloc_failure; /* Allocate a CQ and an associated PBL for each MSI-X * vector. */ for (i = 0; i < qedi->num_queues; i++) { qedi->global_queues[i] = kzalloc(sizeof(*qedi->global_queues[0]), GFP_KERNEL); if (!qedi->global_queues[i]) { QEDI_ERR(&qedi->dbg_ctx, "Unable to allocation global queue %d.\n", i); status = -ENOMEM; goto mem_alloc_failure; } qedi->global_queues[i]->cq_mem_size = (QEDI_CQ_SIZE + 8) * sizeof(union iscsi_cqe); qedi->global_queues[i]->cq_mem_size = (qedi->global_queues[i]->cq_mem_size + (QEDI_PAGE_SIZE - 1)); qedi->global_queues[i]->cq_pbl_size = (qedi->global_queues[i]->cq_mem_size / QEDI_PAGE_SIZE) * sizeof(void *); qedi->global_queues[i]->cq_pbl_size = (qedi->global_queues[i]->cq_pbl_size + (QEDI_PAGE_SIZE - 1)); qedi->global_queues[i]->cq = dma_alloc_coherent(&qedi->pdev->dev, qedi->global_queues[i]->cq_mem_size, &qedi->global_queues[i]->cq_dma, GFP_KERNEL); if (!qedi->global_queues[i]->cq) { QEDI_WARN(&qedi->dbg_ctx, "Could not allocate cq.\n"); status = -ENOMEM; goto mem_alloc_failure; } qedi->global_queues[i]->cq_pbl = dma_alloc_coherent(&qedi->pdev->dev, qedi->global_queues[i]->cq_pbl_size, &qedi->global_queues[i]->cq_pbl_dma, GFP_KERNEL); if (!qedi->global_queues[i]->cq_pbl) { QEDI_WARN(&qedi->dbg_ctx, "Could not allocate cq PBL.\n"); status = -ENOMEM; goto mem_alloc_failure; } /* Create PBL */ num_pages = qedi->global_queues[i]->cq_mem_size / QEDI_PAGE_SIZE; page = qedi->global_queues[i]->cq_dma; pbl = (u32 *)qedi->global_queues[i]->cq_pbl; while (num_pages--) { *pbl = (u32)page; pbl++; *pbl = (u32)((u64)page >> 32); pbl++; page += QEDI_PAGE_SIZE; } } list = (u32 *)qedi->p_cpuq; /* * The list is built as follows: CQ#0 PBL pointer, RQ#0 PBL pointer, * CQ#1 PBL pointer, RQ#1 PBL pointer, etc. Each PBL pointer points * to the physical address which contains an array of pointers to the * physical addresses of the specific queue pages. */ for (i = 0; i < qedi->num_queues; i++) { *list = (u32)qedi->global_queues[i]->cq_pbl_dma; list++; *list = (u32)((u64)qedi->global_queues[i]->cq_pbl_dma >> 32); list++; *list = (u32)0; list++; *list = (u32)((u64)0 >> 32); list++; } return 0; mem_alloc_failure: qedi_free_global_queues(qedi); return status; } int qedi_alloc_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep) { int rval = 0; u32 *pbl; dma_addr_t page; int num_pages; if (!ep) return -EIO; /* Calculate appropriate queue and PBL sizes */ ep->sq_mem_size = QEDI_SQ_SIZE * sizeof(struct iscsi_wqe); ep->sq_mem_size += QEDI_PAGE_SIZE - 1; ep->sq_pbl_size = (ep->sq_mem_size / QEDI_PAGE_SIZE) * sizeof(void *); ep->sq_pbl_size = ep->sq_pbl_size + QEDI_PAGE_SIZE; ep->sq = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_mem_size, &ep->sq_dma, GFP_KERNEL); if (!ep->sq) { QEDI_WARN(&qedi->dbg_ctx, "Could not allocate send queue.\n"); rval = -ENOMEM; goto out; } ep->sq_pbl = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size, &ep->sq_pbl_dma, GFP_KERNEL); if (!ep->sq_pbl) { QEDI_WARN(&qedi->dbg_ctx, "Could not allocate send queue PBL.\n"); rval = -ENOMEM; goto out_free_sq; } /* Create PBL */ num_pages = ep->sq_mem_size / QEDI_PAGE_SIZE; page = ep->sq_dma; pbl = (u32 *)ep->sq_pbl; while (num_pages--) { *pbl = (u32)page; pbl++; *pbl = (u32)((u64)page >> 32); pbl++; page += QEDI_PAGE_SIZE; } return rval; out_free_sq: dma_free_coherent(&qedi->pdev->dev, ep->sq_mem_size, ep->sq, ep->sq_dma); out: return rval; } void qedi_free_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep) { if (ep->sq_pbl) dma_free_coherent(&qedi->pdev->dev, ep->sq_pbl_size, ep->sq_pbl, ep->sq_pbl_dma); if (ep->sq) dma_free_coherent(&qedi->pdev->dev, ep->sq_mem_size, ep->sq, ep->sq_dma); } int qedi_get_task_idx(struct qedi_ctx *qedi) { s16 tmp_idx; again: tmp_idx = find_first_zero_bit(qedi->task_idx_map, MAX_ISCSI_TASK_ENTRIES); if (tmp_idx >= MAX_ISCSI_TASK_ENTRIES) { QEDI_ERR(&qedi->dbg_ctx, "FW task context pool is full.\n"); tmp_idx = -1; goto err_idx; } if (test_and_set_bit(tmp_idx, qedi->task_idx_map)) goto again; err_idx: return tmp_idx; } void qedi_clear_task_idx(struct qedi_ctx *qedi, int idx) { if (!test_and_clear_bit(idx, qedi->task_idx_map)) QEDI_ERR(&qedi->dbg_ctx, "FW task context, already cleared, tid=0x%x\n", idx); } void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt, struct qedi_cmd *cmd) { qedi->itt_map[tid].itt = proto_itt; qedi->itt_map[tid].p_cmd = cmd; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "update itt map tid=0x%x, with proto itt=0x%x\n", tid, qedi->itt_map[tid].itt); } void qedi_get_task_tid(struct qedi_ctx *qedi, u32 itt, s16 *tid) { u16 i; for (i = 0; i < MAX_ISCSI_TASK_ENTRIES; i++) { if (qedi->itt_map[i].itt == itt) { *tid = i; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "Ref itt=0x%x, found at tid=0x%x\n", itt, *tid); return; } } WARN_ON(1); } void qedi_get_proto_itt(struct qedi_ctx *qedi, u32 tid, u32 *proto_itt) { *proto_itt = qedi->itt_map[tid].itt; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "Get itt map tid [0x%x with proto itt[0x%x]", tid, *proto_itt); } struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid) { struct qedi_cmd *cmd = NULL; if (tid >= MAX_ISCSI_TASK_ENTRIES) return NULL; cmd = qedi->itt_map[tid].p_cmd; if (cmd->task_id != tid) return NULL; qedi->itt_map[tid].p_cmd = NULL; return cmd; } static int qedi_alloc_itt(struct qedi_ctx *qedi) { qedi->itt_map = kcalloc(MAX_ISCSI_TASK_ENTRIES, sizeof(struct qedi_itt_map), GFP_KERNEL); if (!qedi->itt_map) { QEDI_ERR(&qedi->dbg_ctx, "Unable to allocate itt map array memory\n"); return -ENOMEM; } return 0; } static void qedi_free_itt(struct qedi_ctx *qedi) { kfree(qedi->itt_map); } static struct qed_ll2_cb_ops qedi_ll2_cb_ops = { .rx_cb = qedi_ll2_rx, .tx_cb = NULL, }; static int qedi_percpu_io_thread(void *arg) { struct qedi_percpu_s *p = arg; struct qedi_work *work, *tmp; unsigned long flags; LIST_HEAD(work_list); set_user_nice(current, -20); while (!kthread_should_stop()) { spin_lock_irqsave(&p->p_work_lock, flags); while (!list_empty(&p->work_list)) { list_splice_init(&p->work_list, &work_list); spin_unlock_irqrestore(&p->p_work_lock, flags); list_for_each_entry_safe(work, tmp, &work_list, list) { list_del_init(&work->list); qedi_fp_process_cqes(work); if (!work->is_solicited) kfree(work); } cond_resched(); spin_lock_irqsave(&p->p_work_lock, flags); } set_current_state(TASK_INTERRUPTIBLE); spin_unlock_irqrestore(&p->p_work_lock, flags); schedule(); } __set_current_state(TASK_RUNNING); return 0; } static int qedi_cpu_online(unsigned int cpu) { struct qedi_percpu_s *p = this_cpu_ptr(&qedi_percpu); struct task_struct *thread; thread = kthread_create_on_node(qedi_percpu_io_thread, (void *)p, cpu_to_node(cpu), "qedi_thread/%d", cpu); if (IS_ERR(thread)) return PTR_ERR(thread); kthread_bind(thread, cpu); p->iothread = thread; wake_up_process(thread); return 0; } static int qedi_cpu_offline(unsigned int cpu) { struct qedi_percpu_s *p = this_cpu_ptr(&qedi_percpu); struct qedi_work *work, *tmp; struct task_struct *thread; unsigned long flags; spin_lock_irqsave(&p->p_work_lock, flags); thread = p->iothread; p->iothread = NULL; list_for_each_entry_safe(work, tmp, &p->work_list, list) { list_del_init(&work->list); qedi_fp_process_cqes(work); if (!work->is_solicited) kfree(work); } spin_unlock_irqrestore(&p->p_work_lock, flags); if (thread) kthread_stop(thread); return 0; } void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu) { struct qed_ll2_params params; qedi_recover_all_conns(qedi); qedi_ops->ll2->stop(qedi->cdev); qedi_ll2_free_skbs(qedi); QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "old MTU %u, new MTU %u\n", qedi->ll2_mtu, mtu); memset(&params, 0, sizeof(params)); qedi->ll2_mtu = mtu; params.mtu = qedi->ll2_mtu + IPV6_HDR_LEN + TCP_HDR_LEN; params.drop_ttl0_packets = 0; params.rx_vlan_stripping = 1; ether_addr_copy(params.ll2_mac_address, qedi->dev_info.common.hw_mac); qedi_ops->ll2->start(qedi->cdev, &params); } /* * qedi_get_nvram_block: - Scan through the iSCSI NVRAM block (while accounting * for gaps) for the matching absolute-pf-id of the QEDI device. */ static struct nvm_iscsi_block * qedi_get_nvram_block(struct qedi_ctx *qedi) { int i; u8 pf; u32 flags; struct nvm_iscsi_block *block; pf = qedi->dev_info.common.abs_pf_id; block = &qedi->iscsi_image->iscsi_cfg.block[0]; for (i = 0; i < NUM_OF_ISCSI_PF_SUPPORTED; i++, block++) { flags = ((block->id) & NVM_ISCSI_CFG_BLK_CTRL_FLAG_MASK) >> NVM_ISCSI_CFG_BLK_CTRL_FLAG_OFFSET; if (flags & (NVM_ISCSI_CFG_BLK_CTRL_FLAG_IS_NOT_EMPTY | NVM_ISCSI_CFG_BLK_CTRL_FLAG_PF_MAPPED) && (pf == (block->id & NVM_ISCSI_CFG_BLK_MAPPED_PF_ID_MASK) >> NVM_ISCSI_CFG_BLK_MAPPED_PF_ID_OFFSET)) return block; } return NULL; } static ssize_t qedi_show_boot_eth_info(void *data, int type, char *buf) { struct qedi_ctx *qedi = data; struct nvm_iscsi_initiator *initiator; int rc = 1; u32 ipv6_en, dhcp_en, ip_len; struct nvm_iscsi_block *block; char *fmt, *ip, *sub, *gw; block = qedi_get_nvram_block(qedi); if (!block) return 0; initiator = &block->initiator; ipv6_en = block->generic.ctrl_flags & NVM_ISCSI_CFG_GEN_IPV6_ENABLED; dhcp_en = block->generic.ctrl_flags & NVM_ISCSI_CFG_GEN_DHCP_TCPIP_CONFIG_ENABLED; /* Static IP assignments. */ fmt = ipv6_en ? "%pI6\n" : "%pI4\n"; ip = ipv6_en ? initiator->ipv6.addr.byte : initiator->ipv4.addr.byte; ip_len = ipv6_en ? IPV6_LEN : IPV4_LEN; sub = ipv6_en ? initiator->ipv6.subnet_mask.byte : initiator->ipv4.subnet_mask.byte; gw = ipv6_en ? initiator->ipv6.gateway.byte : initiator->ipv4.gateway.byte; /* DHCP IP adjustments. */ fmt = dhcp_en ? "%s\n" : fmt; if (dhcp_en) { ip = ipv6_en ? "0::0" : "0.0.0.0"; sub = ip; gw = ip; ip_len = ipv6_en ? 5 : 8; } switch (type) { case ISCSI_BOOT_ETH_IP_ADDR: rc = snprintf(buf, ip_len, fmt, ip); break; case ISCSI_BOOT_ETH_SUBNET_MASK: rc = snprintf(buf, ip_len, fmt, sub); break; case ISCSI_BOOT_ETH_GATEWAY: rc = snprintf(buf, ip_len, fmt, gw); break; case ISCSI_BOOT_ETH_FLAGS: rc = snprintf(buf, 3, "%d\n", (char)SYSFS_FLAG_FW_SEL_BOOT); break; case ISCSI_BOOT_ETH_INDEX: rc = snprintf(buf, 3, "0\n"); break; case ISCSI_BOOT_ETH_MAC: rc = sysfs_format_mac(buf, qedi->mac, ETH_ALEN); break; case ISCSI_BOOT_ETH_VLAN: rc = snprintf(buf, 12, "%d\n", GET_FIELD2(initiator->generic_cont0, NVM_ISCSI_CFG_INITIATOR_VLAN)); break; case ISCSI_BOOT_ETH_ORIGIN: if (dhcp_en) rc = snprintf(buf, 3, "3\n"); break; default: rc = 0; break; } return rc; } static umode_t qedi_eth_get_attr_visibility(void *data, int type) { int rc = 1; switch (type) { case ISCSI_BOOT_ETH_FLAGS: case ISCSI_BOOT_ETH_MAC: case ISCSI_BOOT_ETH_INDEX: case ISCSI_BOOT_ETH_IP_ADDR: case ISCSI_BOOT_ETH_SUBNET_MASK: case ISCSI_BOOT_ETH_GATEWAY: case ISCSI_BOOT_ETH_ORIGIN: case ISCSI_BOOT_ETH_VLAN: rc = 0444; break; default: rc = 0; break; } return rc; } static ssize_t qedi_show_boot_ini_info(void *data, int type, char *buf) { struct qedi_ctx *qedi = data; struct nvm_iscsi_initiator *initiator; int rc; struct nvm_iscsi_block *block; block = qedi_get_nvram_block(qedi); if (!block) return 0; initiator = &block->initiator; switch (type) { case ISCSI_BOOT_INI_INITIATOR_NAME: rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, initiator->initiator_name.byte); break; default: rc = 0; break; } return rc; } static umode_t qedi_ini_get_attr_visibility(void *data, int type) { int rc; switch (type) { case ISCSI_BOOT_INI_INITIATOR_NAME: rc = 0444; break; default: rc = 0; break; } return rc; } static ssize_t qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type, char *buf, enum qedi_nvm_tgts idx) { int rc = 1; u32 ctrl_flags, ipv6_en, chap_en, mchap_en, ip_len; struct nvm_iscsi_block *block; char *chap_name, *chap_secret; char *mchap_name, *mchap_secret; block = qedi_get_nvram_block(qedi); if (!block) goto exit_show_tgt_info; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_EVT, "Port:%d, tgt_idx:%d\n", GET_FIELD2(block->id, NVM_ISCSI_CFG_BLK_MAPPED_PF_ID), idx); ctrl_flags = block->target[idx].ctrl_flags & NVM_ISCSI_CFG_TARGET_ENABLED; if (!ctrl_flags) { QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_EVT, "Target disabled\n"); goto exit_show_tgt_info; } ipv6_en = block->generic.ctrl_flags & NVM_ISCSI_CFG_GEN_IPV6_ENABLED; ip_len = ipv6_en ? IPV6_LEN : IPV4_LEN; chap_en = block->generic.ctrl_flags & NVM_ISCSI_CFG_GEN_CHAP_ENABLED; chap_name = chap_en ? block->initiator.chap_name.byte : NULL; chap_secret = chap_en ? block->initiator.chap_password.byte : NULL; mchap_en = block->generic.ctrl_flags & NVM_ISCSI_CFG_GEN_CHAP_MUTUAL_ENABLED; mchap_name = mchap_en ? block->target[idx].chap_name.byte : NULL; mchap_secret = mchap_en ? block->target[idx].chap_password.byte : NULL; switch (type) { case ISCSI_BOOT_TGT_NAME: rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, block->target[idx].target_name.byte); break; case ISCSI_BOOT_TGT_IP_ADDR: if (ipv6_en) rc = snprintf(buf, ip_len, "%pI6\n", block->target[idx].ipv6_addr.byte); else rc = snprintf(buf, ip_len, "%pI4\n", block->target[idx].ipv4_addr.byte); break; case ISCSI_BOOT_TGT_PORT: rc = snprintf(buf, 12, "%d\n", GET_FIELD2(block->target[idx].generic_cont0, NVM_ISCSI_CFG_TARGET_TCP_PORT)); break; case ISCSI_BOOT_TGT_LUN: rc = snprintf(buf, 22, "%.*d\n", block->target[idx].lun.value[1], block->target[idx].lun.value[0]); break; case ISCSI_BOOT_TGT_CHAP_NAME: rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, chap_name); break; case ISCSI_BOOT_TGT_CHAP_SECRET: rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN, chap_secret); break; case ISCSI_BOOT_TGT_REV_CHAP_NAME: rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, mchap_name); break; case ISCSI_BOOT_TGT_REV_CHAP_SECRET: rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN, mchap_secret); break; case ISCSI_BOOT_TGT_FLAGS: rc = snprintf(buf, 3, "%d\n", (char)SYSFS_FLAG_FW_SEL_BOOT); break; case ISCSI_BOOT_TGT_NIC_ASSOC: rc = snprintf(buf, 3, "0\n"); break; default: rc = 0; break; } exit_show_tgt_info: return rc; } static ssize_t qedi_show_boot_tgt_pri_info(void *data, int type, char *buf) { struct qedi_ctx *qedi = data; return qedi_show_boot_tgt_info(qedi, type, buf, QEDI_NVM_TGT_PRI); } static ssize_t qedi_show_boot_tgt_sec_info(void *data, int type, char *buf) { struct qedi_ctx *qedi = data; return qedi_show_boot_tgt_info(qedi, type, buf, QEDI_NVM_TGT_SEC); } static umode_t qedi_tgt_get_attr_visibility(void *data, int type) { int rc; switch (type) { case ISCSI_BOOT_TGT_NAME: case ISCSI_BOOT_TGT_IP_ADDR: case ISCSI_BOOT_TGT_PORT: case ISCSI_BOOT_TGT_LUN: case ISCSI_BOOT_TGT_CHAP_NAME: case ISCSI_BOOT_TGT_CHAP_SECRET: case ISCSI_BOOT_TGT_REV_CHAP_NAME: case ISCSI_BOOT_TGT_REV_CHAP_SECRET: case ISCSI_BOOT_TGT_NIC_ASSOC: case ISCSI_BOOT_TGT_FLAGS: rc = 0444; break; default: rc = 0; break; } return rc; } static void qedi_boot_release(void *data) { struct qedi_ctx *qedi = data; scsi_host_put(qedi->shost); } static int qedi_get_boot_info(struct qedi_ctx *qedi) { int ret = 1; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "Get NVM iSCSI CFG image\n"); ret = qedi_ops->common->nvm_get_image(qedi->cdev, QED_NVM_IMAGE_ISCSI_CFG, (char *)qedi->iscsi_image, sizeof(struct qedi_nvm_iscsi_image)); if (ret) QEDI_ERR(&qedi->dbg_ctx, "Could not get NVM image. ret = %d\n", ret); return ret; } static int qedi_setup_boot_info(struct qedi_ctx *qedi) { struct iscsi_boot_kobj *boot_kobj; if (qedi_get_boot_info(qedi)) return -EPERM; qedi->boot_kset = iscsi_boot_create_host_kset(qedi->shost->host_no); if (!qedi->boot_kset) goto kset_free; if (!scsi_host_get(qedi->shost)) goto kset_free; boot_kobj = iscsi_boot_create_target(qedi->boot_kset, 0, qedi, qedi_show_boot_tgt_pri_info, qedi_tgt_get_attr_visibility, qedi_boot_release); if (!boot_kobj) goto put_host; if (!scsi_host_get(qedi->shost)) goto kset_free; boot_kobj = iscsi_boot_create_target(qedi->boot_kset, 1, qedi, qedi_show_boot_tgt_sec_info, qedi_tgt_get_attr_visibility, qedi_boot_release); if (!boot_kobj) goto put_host; if (!scsi_host_get(qedi->shost)) goto kset_free; boot_kobj = iscsi_boot_create_initiator(qedi->boot_kset, 0, qedi, qedi_show_boot_ini_info, qedi_ini_get_attr_visibility, qedi_boot_release); if (!boot_kobj) goto put_host; if (!scsi_host_get(qedi->shost)) goto kset_free; boot_kobj = iscsi_boot_create_ethernet(qedi->boot_kset, 0, qedi, qedi_show_boot_eth_info, qedi_eth_get_attr_visibility, qedi_boot_release); if (!boot_kobj) goto put_host; return 0; put_host: scsi_host_put(qedi->shost); kset_free: iscsi_boot_destroy_kset(qedi->boot_kset); return -ENOMEM; } static pci_ers_result_t qedi_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct qedi_ctx *qedi = pci_get_drvdata(pdev); QEDI_ERR(&qedi->dbg_ctx, "%s: PCI error detected [%d]\n", __func__, state); if (test_and_set_bit(QEDI_IN_RECOVERY, &qedi->flags)) { QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "Recovery already in progress.\n"); return PCI_ERS_RESULT_NONE; } qedi_ops->common->recovery_process(qedi->cdev); return PCI_ERS_RESULT_CAN_RECOVER; } static void __qedi_remove(struct pci_dev *pdev, int mode) { struct qedi_ctx *qedi = pci_get_drvdata(pdev); int rval; u16 retry = 10; if (mode == QEDI_MODE_NORMAL) iscsi_host_remove(qedi->shost, false); else if (mode == QEDI_MODE_SHUTDOWN) iscsi_host_remove(qedi->shost, true); if (mode == QEDI_MODE_NORMAL || mode == QEDI_MODE_SHUTDOWN) { if (qedi->tmf_thread) { destroy_workqueue(qedi->tmf_thread); qedi->tmf_thread = NULL; } if (qedi->offload_thread) { destroy_workqueue(qedi->offload_thread); qedi->offload_thread = NULL; } } #ifdef CONFIG_DEBUG_FS qedi_dbg_host_exit(&qedi->dbg_ctx); #endif if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) qedi_ops->common->set_power_state(qedi->cdev, PCI_D0); qedi_sync_free_irqs(qedi); if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) { while (retry--) { rval = qedi_ops->stop(qedi->cdev); if (rval < 0) msleep(1000); else break; } qedi_ops->ll2->stop(qedi->cdev); } cancel_delayed_work_sync(&qedi->recovery_work); cancel_delayed_work_sync(&qedi->board_disable_work); qedi_free_iscsi_pf_param(qedi); rval = qedi_ops->common->update_drv_state(qedi->cdev, false); if (rval) QEDI_ERR(&qedi->dbg_ctx, "Failed to send drv state to MFW\n"); if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) { qedi_ops->common->slowpath_stop(qedi->cdev); qedi_ops->common->remove(qedi->cdev); } qedi_destroy_fp(qedi); if (mode == QEDI_MODE_NORMAL || mode == QEDI_MODE_SHUTDOWN) { qedi_release_cid_que(qedi); qedi_cm_free_mem(qedi); qedi_free_uio(qedi->udev); qedi_free_itt(qedi); if (qedi->ll2_recv_thread) { kthread_stop(qedi->ll2_recv_thread); qedi->ll2_recv_thread = NULL; } qedi_ll2_free_skbs(qedi); if (qedi->boot_kset) iscsi_boot_destroy_kset(qedi->boot_kset); iscsi_host_free(qedi->shost); } } static void qedi_board_disable_work(struct work_struct *work) { struct qedi_ctx *qedi = container_of(work, struct qedi_ctx, board_disable_work.work); QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "Fan failure, Unloading firmware context.\n"); if (test_and_set_bit(QEDI_IN_SHUTDOWN, &qedi->flags)) return; __qedi_remove(qedi->pdev, QEDI_MODE_NORMAL); } static void qedi_shutdown(struct pci_dev *pdev) { struct qedi_ctx *qedi = pci_get_drvdata(pdev); QEDI_ERR(&qedi->dbg_ctx, "%s: Shutdown qedi\n", __func__); if (test_and_set_bit(QEDI_IN_SHUTDOWN, &qedi->flags)) return; __qedi_remove(pdev, QEDI_MODE_SHUTDOWN); } static int qedi_suspend(struct pci_dev *pdev, pm_message_t state) { struct qedi_ctx *qedi; if (!pdev) { QEDI_ERR(NULL, "pdev is NULL.\n"); return -ENODEV; } qedi = pci_get_drvdata(pdev); QEDI_ERR(&qedi->dbg_ctx, "%s: Device does not support suspend operation\n", __func__); return -EPERM; } static int __qedi_probe(struct pci_dev *pdev, int mode) { struct qedi_ctx *qedi; struct qed_ll2_params params; u8 dp_level = 0; bool is_vf = false; char host_buf[16]; struct qed_link_params link_params; struct qed_slowpath_params sp_params; struct qed_probe_params qed_params; void *task_start, *task_end; int rc; u16 retry = 10; if (mode != QEDI_MODE_RECOVERY) { qedi = qedi_host_alloc(pdev); if (!qedi) { rc = -ENOMEM; goto exit_probe; } } else { qedi = pci_get_drvdata(pdev); } retry_probe: if (mode == QEDI_MODE_RECOVERY) msleep(2000); memset(&qed_params, 0, sizeof(qed_params)); qed_params.protocol = QED_PROTOCOL_ISCSI; qed_params.dp_module = qedi_qed_debug; qed_params.dp_level = dp_level; qed_params.is_vf = is_vf; qedi->cdev = qedi_ops->common->probe(pdev, &qed_params); if (!qedi->cdev) { if (mode == QEDI_MODE_RECOVERY && retry) { QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "Retry %d initialize hardware\n", retry); retry--; goto retry_probe; } rc = -ENODEV; QEDI_ERR(&qedi->dbg_ctx, "Cannot initialize hardware\n"); goto free_host; } set_bit(QEDI_ERR_ATTN_CLR_EN, &qedi->qedi_err_flags); set_bit(QEDI_ERR_IS_RECOVERABLE, &qedi->qedi_err_flags); atomic_set(&qedi->link_state, QEDI_LINK_DOWN); rc = qedi_ops->fill_dev_info(qedi->cdev, &qedi->dev_info); if (rc) goto free_host; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "dev_info: num_hwfns=%d affin_hwfn_idx=%d.\n", qedi->dev_info.common.num_hwfns, qedi_ops->common->get_affin_hwfn_idx(qedi->cdev)); rc = qedi_set_iscsi_pf_param(qedi); if (rc) { rc = -ENOMEM; QEDI_ERR(&qedi->dbg_ctx, "Set iSCSI pf param fail\n"); goto free_host; } qedi_ops->common->update_pf_params(qedi->cdev, &qedi->pf_params); rc = qedi_prepare_fp(qedi); if (rc) { QEDI_ERR(&qedi->dbg_ctx, "Cannot start slowpath.\n"); goto free_pf_params; } /* Start the Slowpath-process */ memset(&sp_params, 0, sizeof(struct qed_slowpath_params)); sp_params.int_mode = QED_INT_MODE_MSIX; sp_params.drv_major = QEDI_DRIVER_MAJOR_VER; sp_params.drv_minor = QEDI_DRIVER_MINOR_VER; sp_params.drv_rev = QEDI_DRIVER_REV_VER; sp_params.drv_eng = QEDI_DRIVER_ENG_VER; strscpy(sp_params.name, "qedi iSCSI", QED_DRV_VER_STR_SIZE); rc = qedi_ops->common->slowpath_start(qedi->cdev, &sp_params); if (rc) { QEDI_ERR(&qedi->dbg_ctx, "Cannot start slowpath\n"); goto stop_hw; } /* update_pf_params needs to be called before and after slowpath * start */ qedi_ops->common->update_pf_params(qedi->cdev, &qedi->pf_params); rc = qedi_setup_int(qedi); if (rc) goto stop_iscsi_func; qedi_ops->common->set_power_state(qedi->cdev, PCI_D0); /* Learn information crucial for qedi to progress */ rc = qedi_ops->fill_dev_info(qedi->cdev, &qedi->dev_info); if (rc) goto stop_iscsi_func; /* Record BDQ producer doorbell addresses */ qedi->bdq_primary_prod = qedi->dev_info.primary_dbq_rq_addr; qedi->bdq_secondary_prod = qedi->dev_info.secondary_bdq_rq_addr; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, "BDQ primary_prod=%p secondary_prod=%p.\n", qedi->bdq_primary_prod, qedi->bdq_secondary_prod); /* * We need to write the number of BDs in the BDQ we've preallocated so * the f/w will do a prefetch and we'll get an unsolicited CQE when a * packet arrives. */ qedi->bdq_prod_idx = QEDI_BDQ_NUM; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, "Writing %d to primary and secondary BDQ doorbell registers.\n", qedi->bdq_prod_idx); writew(qedi->bdq_prod_idx, qedi->bdq_primary_prod); readw(qedi->bdq_primary_prod); writew(qedi->bdq_prod_idx, qedi->bdq_secondary_prod); readw(qedi->bdq_secondary_prod); ether_addr_copy(qedi->mac, qedi->dev_info.common.hw_mac); QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, "MAC address is %pM.\n", qedi->mac); snprintf(host_buf, sizeof(host_buf), "host_%d", qedi->shost->host_no); qedi_ops->common->set_name(qedi->cdev, host_buf); qedi_ops->register_ops(qedi->cdev, &qedi_cb_ops, qedi); memset(&params, 0, sizeof(params)); params.mtu = DEF_PATH_MTU + IPV6_HDR_LEN + TCP_HDR_LEN; qedi->ll2_mtu = DEF_PATH_MTU; params.drop_ttl0_packets = 0; params.rx_vlan_stripping = 1; ether_addr_copy(params.ll2_mac_address, qedi->dev_info.common.hw_mac); if (mode != QEDI_MODE_RECOVERY) { /* set up rx path */ INIT_LIST_HEAD(&qedi->ll2_skb_list); spin_lock_init(&qedi->ll2_lock); /* start qedi context */ spin_lock_init(&qedi->hba_lock); spin_lock_init(&qedi->task_idx_lock); mutex_init(&qedi->stats_lock); } qedi_ops->ll2->register_cb_ops(qedi->cdev, &qedi_ll2_cb_ops, qedi); qedi_ops->ll2->start(qedi->cdev, &params); if (mode != QEDI_MODE_RECOVERY) { qedi->ll2_recv_thread = kthread_run(qedi_ll2_recv_thread, (void *)qedi, "qedi_ll2_thread"); } rc = qedi_ops->start(qedi->cdev, &qedi->tasks, qedi, qedi_iscsi_event_cb); if (rc) { rc = -ENODEV; QEDI_ERR(&qedi->dbg_ctx, "Cannot start iSCSI function\n"); goto stop_slowpath; } task_start = qedi_get_task_mem(&qedi->tasks, 0); task_end = qedi_get_task_mem(&qedi->tasks, MAX_TID_BLOCKS_ISCSI - 1); QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, "Task context start=%p, end=%p block_size=%u.\n", task_start, task_end, qedi->tasks.size); memset(&link_params, 0, sizeof(link_params)); link_params.link_up = true; rc = qedi_ops->common->set_link(qedi->cdev, &link_params); if (rc) { QEDI_WARN(&qedi->dbg_ctx, "Link set up failed.\n"); atomic_set(&qedi->link_state, QEDI_LINK_DOWN); } #ifdef CONFIG_DEBUG_FS qedi_dbg_host_init(&qedi->dbg_ctx, qedi_debugfs_ops, qedi_dbg_fops); #endif QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "QLogic FastLinQ iSCSI Module qedi %s, FW %d.%d.%d.%d\n", QEDI_MODULE_VERSION, FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION, FW_ENGINEERING_VERSION); if (mode == QEDI_MODE_NORMAL) { if (iscsi_host_add(qedi->shost, &pdev->dev)) { QEDI_ERR(&qedi->dbg_ctx, "Could not add iscsi host\n"); rc = -ENOMEM; goto remove_host; } /* Allocate uio buffers */ rc = qedi_alloc_uio_rings(qedi); if (rc) { QEDI_ERR(&qedi->dbg_ctx, "UIO alloc ring failed err=%d\n", rc); goto remove_host; } rc = qedi_init_uio(qedi); if (rc) { QEDI_ERR(&qedi->dbg_ctx, "UIO init failed, err=%d\n", rc); goto free_uio; } /* host the array on iscsi_conn */ rc = qedi_setup_cid_que(qedi); if (rc) { QEDI_ERR(&qedi->dbg_ctx, "Could not setup cid que\n"); goto free_uio; } rc = qedi_cm_alloc_mem(qedi); if (rc) { QEDI_ERR(&qedi->dbg_ctx, "Could not alloc cm memory\n"); goto free_cid_que; } rc = qedi_alloc_itt(qedi); if (rc) { QEDI_ERR(&qedi->dbg_ctx, "Could not alloc itt memory\n"); goto free_cid_que; } sprintf(host_buf, "host_%d", qedi->shost->host_no); qedi->tmf_thread = create_singlethread_workqueue(host_buf); if (!qedi->tmf_thread) { QEDI_ERR(&qedi->dbg_ctx, "Unable to start tmf thread!\n"); rc = -ENODEV; goto free_cid_que; } sprintf(host_buf, "qedi_ofld%d", qedi->shost->host_no); qedi->offload_thread = create_workqueue(host_buf); if (!qedi->offload_thread) { QEDI_ERR(&qedi->dbg_ctx, "Unable to start offload thread!\n"); rc = -ENODEV; goto free_tmf_thread; } INIT_DELAYED_WORK(&qedi->recovery_work, qedi_recovery_handler); INIT_DELAYED_WORK(&qedi->board_disable_work, qedi_board_disable_work); /* F/w needs 1st task context memory entry for performance */ set_bit(QEDI_RESERVE_TASK_ID, qedi->task_idx_map); atomic_set(&qedi->num_offloads, 0); if (qedi_setup_boot_info(qedi)) QEDI_ERR(&qedi->dbg_ctx, "No iSCSI boot target configured\n"); rc = qedi_ops->common->update_drv_state(qedi->cdev, true); if (rc) QEDI_ERR(&qedi->dbg_ctx, "Failed to send drv state to MFW\n"); } return 0; free_tmf_thread: destroy_workqueue(qedi->tmf_thread); free_cid_que: qedi_release_cid_que(qedi); free_uio: qedi_free_uio(qedi->udev); remove_host: #ifdef CONFIG_DEBUG_FS qedi_dbg_host_exit(&qedi->dbg_ctx); #endif iscsi_host_remove(qedi->shost, false); stop_iscsi_func: qedi_ops->stop(qedi->cdev); stop_slowpath: qedi_ops->common->slowpath_stop(qedi->cdev); stop_hw: qedi_ops->common->remove(qedi->cdev); free_pf_params: qedi_free_iscsi_pf_param(qedi); free_host: iscsi_host_free(qedi->shost); exit_probe: return rc; } static void qedi_mark_conn_recovery(struct iscsi_cls_session *cls_session) { struct iscsi_session *session = cls_session->dd_data; struct iscsi_conn *conn = session->leadconn; struct qedi_conn *qedi_conn = conn->dd_data; iscsi_conn_failure(qedi_conn->cls_conn->dd_data, ISCSI_ERR_CONN_FAILED); } static void qedi_recovery_handler(struct work_struct *work) { struct qedi_ctx *qedi = container_of(work, struct qedi_ctx, recovery_work.work); iscsi_host_for_each_session(qedi->shost, qedi_mark_conn_recovery); /* Call common_ops->recovery_prolog to allow the MFW to quiesce * any PCI transactions. */ qedi_ops->common->recovery_prolog(qedi->cdev); __qedi_remove(qedi->pdev, QEDI_MODE_RECOVERY); __qedi_probe(qedi->pdev, QEDI_MODE_RECOVERY); clear_bit(QEDI_IN_RECOVERY, &qedi->flags); } static int qedi_probe(struct pci_dev *pdev, const struct pci_device_id *id) { return __qedi_probe(pdev, QEDI_MODE_NORMAL); } static void qedi_remove(struct pci_dev *pdev) { __qedi_remove(pdev, QEDI_MODE_NORMAL); } static struct pci_device_id qedi_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165E) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x8084) }, { 0 }, }; MODULE_DEVICE_TABLE(pci, qedi_pci_tbl); static enum cpuhp_state qedi_cpuhp_state; static struct pci_error_handlers qedi_err_handler = { .error_detected = qedi_io_error_detected, }; static struct pci_driver qedi_pci_driver = { .name = QEDI_MODULE_NAME, .id_table = qedi_pci_tbl, .probe = qedi_probe, .remove = qedi_remove, .shutdown = qedi_shutdown, .err_handler = &qedi_err_handler, .suspend = qedi_suspend, }; static int __init qedi_init(void) { struct qedi_percpu_s *p; int cpu, rc = 0; qedi_ops = qed_get_iscsi_ops(); if (!qedi_ops) { QEDI_ERR(NULL, "Failed to get qed iSCSI operations\n"); return -EINVAL; } #ifdef CONFIG_DEBUG_FS qedi_dbg_init("qedi"); #endif qedi_scsi_transport = iscsi_register_transport(&qedi_iscsi_transport); if (!qedi_scsi_transport) { QEDI_ERR(NULL, "Could not register qedi transport"); rc = -ENOMEM; goto exit_qedi_init_1; } for_each_possible_cpu(cpu) { p = &per_cpu(qedi_percpu, cpu); INIT_LIST_HEAD(&p->work_list); spin_lock_init(&p->p_work_lock); p->iothread = NULL; } rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "scsi/qedi:online", qedi_cpu_online, qedi_cpu_offline); if (rc < 0) goto exit_qedi_init_2; qedi_cpuhp_state = rc; rc = pci_register_driver(&qedi_pci_driver); if (rc) { QEDI_ERR(NULL, "Failed to register driver\n"); goto exit_qedi_hp; } return 0; exit_qedi_hp: cpuhp_remove_state(qedi_cpuhp_state); exit_qedi_init_2: iscsi_unregister_transport(&qedi_iscsi_transport); exit_qedi_init_1: #ifdef CONFIG_DEBUG_FS qedi_dbg_exit(); #endif qed_put_iscsi_ops(); return rc; } static void __exit qedi_cleanup(void) { pci_unregister_driver(&qedi_pci_driver); cpuhp_remove_state(qedi_cpuhp_state); iscsi_unregister_transport(&qedi_iscsi_transport); #ifdef CONFIG_DEBUG_FS qedi_dbg_exit(); #endif qed_put_iscsi_ops(); } MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx iSCSI Module"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("QLogic Corporation"); MODULE_VERSION(QEDI_MODULE_VERSION); module_init(qedi_init); module_exit(qedi_cleanup);
linux-master
drivers/scsi/qedi/qedi_main.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic iSCSI Offload Driver * Copyright (c) 2016 Cavium Inc. */ #include <linux/blkdev.h> #include <linux/etherdevice.h> #include <linux/if_ether.h> #include <linux/if_vlan.h> #include <scsi/scsi_tcq.h> #include "qedi.h" #include "qedi_iscsi.h" #include "qedi_gbl.h" int qedi_recover_all_conns(struct qedi_ctx *qedi) { struct qedi_conn *qedi_conn; int i; for (i = 0; i < qedi->max_active_conns; i++) { qedi_conn = qedi_get_conn_from_id(qedi, i); if (!qedi_conn) continue; qedi_start_conn_recovery(qedi, qedi_conn); } return SUCCESS; } static int qedi_eh_host_reset(struct scsi_cmnd *cmd) { struct Scsi_Host *shost = cmd->device->host; struct qedi_ctx *qedi; qedi = iscsi_host_priv(shost); return qedi_recover_all_conns(qedi); } const struct scsi_host_template qedi_host_template = { .module = THIS_MODULE, .name = "QLogic QEDI 25/40/100Gb iSCSI Initiator Driver", .proc_name = QEDI_MODULE_NAME, .queuecommand = iscsi_queuecommand, .eh_timed_out = iscsi_eh_cmd_timed_out, .eh_abort_handler = iscsi_eh_abort, .eh_device_reset_handler = iscsi_eh_device_reset, .eh_target_reset_handler = iscsi_eh_recover_target, .eh_host_reset_handler = qedi_eh_host_reset, .target_alloc = iscsi_target_alloc, .change_queue_depth = scsi_change_queue_depth, .can_queue = QEDI_MAX_ISCSI_TASK, .this_id = -1, .sg_tablesize = QEDI_ISCSI_MAX_BDS_PER_CMD, .max_sectors = 0xffff, .dma_boundary = QEDI_HW_DMA_BOUNDARY, .cmd_per_lun = 128, .shost_groups = qedi_shost_groups, .cmd_size = sizeof(struct iscsi_cmd), }; static void qedi_conn_free_login_resources(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn) { if (qedi_conn->gen_pdu.resp_bd_tbl) { dma_free_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE, qedi_conn->gen_pdu.resp_bd_tbl, qedi_conn->gen_pdu.resp_bd_dma); qedi_conn->gen_pdu.resp_bd_tbl = NULL; } if (qedi_conn->gen_pdu.req_bd_tbl) { dma_free_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE, qedi_conn->gen_pdu.req_bd_tbl, qedi_conn->gen_pdu.req_bd_dma); qedi_conn->gen_pdu.req_bd_tbl = NULL; } if (qedi_conn->gen_pdu.resp_buf) { dma_free_coherent(&qedi->pdev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, qedi_conn->gen_pdu.resp_buf, qedi_conn->gen_pdu.resp_dma_addr); qedi_conn->gen_pdu.resp_buf = NULL; } if (qedi_conn->gen_pdu.req_buf) { dma_free_coherent(&qedi->pdev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, qedi_conn->gen_pdu.req_buf, qedi_conn->gen_pdu.req_dma_addr); qedi_conn->gen_pdu.req_buf = NULL; } } static int qedi_conn_alloc_login_resources(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn) { qedi_conn->gen_pdu.req_buf = dma_alloc_coherent(&qedi->pdev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, &qedi_conn->gen_pdu.req_dma_addr, GFP_KERNEL); if (!qedi_conn->gen_pdu.req_buf) goto login_req_buf_failure; qedi_conn->gen_pdu.req_buf_size = 0; qedi_conn->gen_pdu.req_wr_ptr = qedi_conn->gen_pdu.req_buf; qedi_conn->gen_pdu.resp_buf = dma_alloc_coherent(&qedi->pdev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, &qedi_conn->gen_pdu.resp_dma_addr, GFP_KERNEL); if (!qedi_conn->gen_pdu.resp_buf) goto login_resp_buf_failure; qedi_conn->gen_pdu.resp_buf_size = ISCSI_DEF_MAX_RECV_SEG_LEN; qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf; qedi_conn->gen_pdu.req_bd_tbl = dma_alloc_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE, &qedi_conn->gen_pdu.req_bd_dma, GFP_KERNEL); if (!qedi_conn->gen_pdu.req_bd_tbl) goto login_req_bd_tbl_failure; qedi_conn->gen_pdu.resp_bd_tbl = dma_alloc_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE, &qedi_conn->gen_pdu.resp_bd_dma, GFP_KERNEL); if (!qedi_conn->gen_pdu.resp_bd_tbl) goto login_resp_bd_tbl_failure; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SESS, "Allocation successful, cid=0x%x\n", qedi_conn->iscsi_conn_id); return 0; login_resp_bd_tbl_failure: dma_free_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE, qedi_conn->gen_pdu.req_bd_tbl, qedi_conn->gen_pdu.req_bd_dma); qedi_conn->gen_pdu.req_bd_tbl = NULL; login_req_bd_tbl_failure: dma_free_coherent(&qedi->pdev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, qedi_conn->gen_pdu.resp_buf, qedi_conn->gen_pdu.resp_dma_addr); qedi_conn->gen_pdu.resp_buf = NULL; login_resp_buf_failure: dma_free_coherent(&qedi->pdev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, qedi_conn->gen_pdu.req_buf, qedi_conn->gen_pdu.req_dma_addr); qedi_conn->gen_pdu.req_buf = NULL; login_req_buf_failure: iscsi_conn_printk(KERN_ERR, qedi_conn->cls_conn->dd_data, "login resource alloc failed!!\n"); return -ENOMEM; } static void qedi_destroy_cmd_pool(struct qedi_ctx *qedi, struct iscsi_session *session) { int i; for (i = 0; i < session->cmds_max; i++) { struct iscsi_task *task = session->cmds[i]; struct qedi_cmd *cmd = task->dd_data; if (cmd->io_tbl.sge_tbl) dma_free_coherent(&qedi->pdev->dev, QEDI_ISCSI_MAX_BDS_PER_CMD * sizeof(struct scsi_sge), cmd->io_tbl.sge_tbl, cmd->io_tbl.sge_tbl_dma); if (cmd->sense_buffer) dma_free_coherent(&qedi->pdev->dev, SCSI_SENSE_BUFFERSIZE, cmd->sense_buffer, cmd->sense_buffer_dma); } } static int qedi_alloc_sget(struct qedi_ctx *qedi, struct iscsi_session *session, struct qedi_cmd *cmd) { struct qedi_io_bdt *io = &cmd->io_tbl; struct scsi_sge *sge; io->sge_tbl = dma_alloc_coherent(&qedi->pdev->dev, QEDI_ISCSI_MAX_BDS_PER_CMD * sizeof(*sge), &io->sge_tbl_dma, GFP_KERNEL); if (!io->sge_tbl) { iscsi_session_printk(KERN_ERR, session, "Could not allocate BD table.\n"); return -ENOMEM; } io->sge_valid = 0; return 0; } static int qedi_setup_cmd_pool(struct qedi_ctx *qedi, struct iscsi_session *session) { int i; for (i = 0; i < session->cmds_max; i++) { struct iscsi_task *task = session->cmds[i]; struct qedi_cmd *cmd = task->dd_data; task->hdr = &cmd->hdr; task->hdr_max = sizeof(struct iscsi_hdr); if (qedi_alloc_sget(qedi, session, cmd)) goto free_sgets; cmd->sense_buffer = dma_alloc_coherent(&qedi->pdev->dev, SCSI_SENSE_BUFFERSIZE, &cmd->sense_buffer_dma, GFP_KERNEL); if (!cmd->sense_buffer) goto free_sgets; } return 0; free_sgets: qedi_destroy_cmd_pool(qedi, session); return -ENOMEM; } static struct iscsi_cls_session * qedi_session_create(struct iscsi_endpoint *ep, u16 cmds_max, u16 qdepth, uint32_t initial_cmdsn) { struct Scsi_Host *shost; struct iscsi_cls_session *cls_session; struct qedi_ctx *qedi; struct qedi_endpoint *qedi_ep; if (!ep) return NULL; qedi_ep = ep->dd_data; shost = qedi_ep->qedi->shost; qedi = iscsi_host_priv(shost); if (cmds_max > qedi->max_sqes) cmds_max = qedi->max_sqes; else if (cmds_max < QEDI_SQ_WQES_MIN) cmds_max = QEDI_SQ_WQES_MIN; cls_session = iscsi_session_setup(&qedi_iscsi_transport, shost, cmds_max, 0, sizeof(struct qedi_cmd), initial_cmdsn, ISCSI_MAX_TARGET); if (!cls_session) { QEDI_ERR(&qedi->dbg_ctx, "Failed to setup session for ep=%p\n", qedi_ep); return NULL; } if (qedi_setup_cmd_pool(qedi, cls_session->dd_data)) { QEDI_ERR(&qedi->dbg_ctx, "Failed to setup cmd pool for ep=%p\n", qedi_ep); goto session_teardown; } return cls_session; session_teardown: iscsi_session_teardown(cls_session); return NULL; } static void qedi_session_destroy(struct iscsi_cls_session *cls_session) { struct iscsi_session *session = cls_session->dd_data; struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); struct qedi_ctx *qedi = iscsi_host_priv(shost); qedi_destroy_cmd_pool(qedi, session); iscsi_session_teardown(cls_session); } static struct iscsi_cls_conn * qedi_conn_create(struct iscsi_cls_session *cls_session, uint32_t cid) { struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); struct qedi_ctx *qedi = iscsi_host_priv(shost); struct iscsi_cls_conn *cls_conn; struct qedi_conn *qedi_conn; struct iscsi_conn *conn; cls_conn = iscsi_conn_setup(cls_session, sizeof(*qedi_conn), cid); if (!cls_conn) { QEDI_ERR(&qedi->dbg_ctx, "conn_new: iscsi conn setup failed, cid=0x%x, cls_sess=%p!\n", cid, cls_session); return NULL; } conn = cls_conn->dd_data; qedi_conn = conn->dd_data; qedi_conn->cls_conn = cls_conn; qedi_conn->qedi = qedi; qedi_conn->ep = NULL; qedi_conn->active_cmd_count = 0; INIT_LIST_HEAD(&qedi_conn->active_cmd_list); spin_lock_init(&qedi_conn->list_lock); if (qedi_conn_alloc_login_resources(qedi, qedi_conn)) { iscsi_conn_printk(KERN_ALERT, conn, "conn_new: login resc alloc failed, cid=0x%x, cls_sess=%p!!\n", cid, cls_session); goto free_conn; } return cls_conn; free_conn: iscsi_conn_teardown(cls_conn); return NULL; } void qedi_mark_device_missing(struct iscsi_cls_session *cls_session) { struct iscsi_session *session = cls_session->dd_data; struct qedi_conn *qedi_conn = session->leadconn->dd_data; spin_lock_bh(&session->frwd_lock); set_bit(QEDI_BLOCK_IO, &qedi_conn->qedi->flags); spin_unlock_bh(&session->frwd_lock); } void qedi_mark_device_available(struct iscsi_cls_session *cls_session) { struct iscsi_session *session = cls_session->dd_data; struct qedi_conn *qedi_conn = session->leadconn->dd_data; spin_lock_bh(&session->frwd_lock); clear_bit(QEDI_BLOCK_IO, &qedi_conn->qedi->flags); spin_unlock_bh(&session->frwd_lock); } static int qedi_bind_conn_to_iscsi_cid(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn) { u32 iscsi_cid = qedi_conn->iscsi_conn_id; if (qedi->cid_que.conn_cid_tbl[iscsi_cid]) { iscsi_conn_printk(KERN_ALERT, qedi_conn->cls_conn->dd_data, "conn bind - entry #%d not free\n", iscsi_cid); return -EBUSY; } qedi->cid_que.conn_cid_tbl[iscsi_cid] = qedi_conn; return 0; } struct qedi_conn *qedi_get_conn_from_id(struct qedi_ctx *qedi, u32 iscsi_cid) { if (!qedi->cid_que.conn_cid_tbl) { QEDI_ERR(&qedi->dbg_ctx, "missing conn<->cid table\n"); return NULL; } else if (iscsi_cid >= qedi->max_active_conns) { QEDI_ERR(&qedi->dbg_ctx, "wrong cid #%d\n", iscsi_cid); return NULL; } return qedi->cid_que.conn_cid_tbl[iscsi_cid]; } static int qedi_conn_bind(struct iscsi_cls_session *cls_session, struct iscsi_cls_conn *cls_conn, u64 transport_fd, int is_leading) { struct iscsi_conn *conn = cls_conn->dd_data; struct qedi_conn *qedi_conn = conn->dd_data; struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); struct qedi_ctx *qedi = iscsi_host_priv(shost); struct qedi_endpoint *qedi_ep; struct iscsi_endpoint *ep; int rc = 0; ep = iscsi_lookup_endpoint(transport_fd); if (!ep) return -EINVAL; qedi_ep = ep->dd_data; if ((qedi_ep->state == EP_STATE_TCP_FIN_RCVD) || (qedi_ep->state == EP_STATE_TCP_RST_RCVD)) { rc = -EINVAL; goto put_ep; } if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) { rc = -EINVAL; goto put_ep; } qedi_ep->conn = qedi_conn; qedi_conn->ep = qedi_ep; qedi_conn->iscsi_ep = ep; qedi_conn->iscsi_conn_id = qedi_ep->iscsi_cid; qedi_conn->fw_cid = qedi_ep->fw_cid; qedi_conn->cmd_cleanup_req = 0; atomic_set(&qedi_conn->cmd_cleanup_cmpl, 0); if (qedi_bind_conn_to_iscsi_cid(qedi, qedi_conn)) { rc = -EINVAL; goto put_ep; } spin_lock_init(&qedi_conn->tmf_work_lock); INIT_LIST_HEAD(&qedi_conn->tmf_work_list); init_waitqueue_head(&qedi_conn->wait_queue); put_ep: iscsi_put_endpoint(ep); return rc; } static int qedi_iscsi_update_conn(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn) { struct qed_iscsi_params_update *conn_info; struct iscsi_cls_conn *cls_conn = qedi_conn->cls_conn; struct iscsi_conn *conn = cls_conn->dd_data; struct qedi_endpoint *qedi_ep; int rval; qedi_ep = qedi_conn->ep; conn_info = kzalloc(sizeof(*conn_info), GFP_KERNEL); if (!conn_info) { QEDI_ERR(&qedi->dbg_ctx, "memory alloc failed\n"); return -ENOMEM; } conn_info->update_flag = 0; if (conn->hdrdgst_en) SET_FIELD(conn_info->update_flag, ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN, true); if (conn->datadgst_en) SET_FIELD(conn_info->update_flag, ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN, true); if (conn->session->initial_r2t_en) SET_FIELD(conn_info->update_flag, ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T, true); if (conn->session->imm_data_en) SET_FIELD(conn_info->update_flag, ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA, true); conn_info->max_seq_size = conn->session->max_burst; conn_info->max_recv_pdu_length = conn->max_recv_dlength; conn_info->max_send_pdu_length = conn->max_xmit_dlength; conn_info->first_seq_length = conn->session->first_burst; conn_info->exp_stat_sn = conn->exp_statsn; rval = qedi_ops->update_conn(qedi->cdev, qedi_ep->handle, conn_info); if (rval) { rval = -ENXIO; QEDI_ERR(&qedi->dbg_ctx, "Could not update connection\n"); } kfree(conn_info); return rval; } static u16 qedi_calc_mss(u16 pmtu, u8 is_ipv6, u8 tcp_ts_en, u8 vlan_en) { u16 mss = 0; u16 hdrs = TCP_HDR_LEN; if (is_ipv6) hdrs += IPV6_HDR_LEN; else hdrs += IPV4_HDR_LEN; mss = pmtu - hdrs; if (!mss) mss = DEF_MSS; return mss; } static int qedi_iscsi_offload_conn(struct qedi_endpoint *qedi_ep) { struct qed_iscsi_params_offload *conn_info; struct qedi_ctx *qedi = qedi_ep->qedi; int rval; int i; conn_info = kzalloc(sizeof(*conn_info), GFP_KERNEL); if (!conn_info) { QEDI_ERR(&qedi->dbg_ctx, "Failed to allocate memory ep=%p\n", qedi_ep); return -ENOMEM; } ether_addr_copy(conn_info->src.mac, qedi_ep->src_mac); ether_addr_copy(conn_info->dst.mac, qedi_ep->dst_mac); conn_info->src.ip[0] = ntohl(qedi_ep->src_addr[0]); conn_info->dst.ip[0] = ntohl(qedi_ep->dst_addr[0]); if (qedi_ep->ip_type == TCP_IPV4) { conn_info->ip_version = 0; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "After ntohl: src_addr=%pI4, dst_addr=%pI4\n", qedi_ep->src_addr, qedi_ep->dst_addr); } else { for (i = 1; i < 4; i++) { conn_info->src.ip[i] = ntohl(qedi_ep->src_addr[i]); conn_info->dst.ip[i] = ntohl(qedi_ep->dst_addr[i]); } conn_info->ip_version = 1; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "After ntohl: src_addr=%pI6, dst_addr=%pI6\n", qedi_ep->src_addr, qedi_ep->dst_addr); } conn_info->src.port = qedi_ep->src_port; conn_info->dst.port = qedi_ep->dst_port; conn_info->layer_code = ISCSI_SLOW_PATH_LAYER_CODE; conn_info->sq_pbl_addr = qedi_ep->sq_pbl_dma; conn_info->vlan_id = qedi_ep->vlan_id; SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_TS_EN, 1); SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_DA_EN, 1); SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_DA_CNT_EN, 1); SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_KA_EN, 1); conn_info->default_cq = (qedi_ep->fw_cid % qedi->num_queues); conn_info->ka_max_probe_cnt = DEF_KA_MAX_PROBE_COUNT; conn_info->dup_ack_theshold = 3; conn_info->rcv_wnd = 65535; conn_info->ss_thresh = 65535; conn_info->srtt = 300; conn_info->rtt_var = 150; conn_info->flow_label = 0; conn_info->ka_timeout = DEF_KA_TIMEOUT; conn_info->ka_interval = DEF_KA_INTERVAL; conn_info->max_rt_time = DEF_MAX_RT_TIME; conn_info->ttl = DEF_TTL; conn_info->tos_or_tc = DEF_TOS; conn_info->remote_port = qedi_ep->dst_port; conn_info->local_port = qedi_ep->src_port; conn_info->mss = qedi_calc_mss(qedi_ep->pmtu, (qedi_ep->ip_type == TCP_IPV6), 1, (qedi_ep->vlan_id != 0)); conn_info->cwnd = DEF_MAX_CWND * conn_info->mss; conn_info->rcv_wnd_scale = 4; conn_info->da_timeout_value = 200; conn_info->ack_frequency = 2; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "Default cq index [%d], mss [%d]\n", conn_info->default_cq, conn_info->mss); /* Prepare the doorbell parameters */ qedi_ep->db_data.agg_flags = 0; qedi_ep->db_data.params = 0; SET_FIELD(qedi_ep->db_data.params, ISCSI_DB_DATA_DEST, DB_DEST_XCM); SET_FIELD(qedi_ep->db_data.params, ISCSI_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX); SET_FIELD(qedi_ep->db_data.params, ISCSI_DB_DATA_AGG_VAL_SEL, DQ_XCM_ISCSI_SQ_PROD_CMD); SET_FIELD(qedi_ep->db_data.params, ISCSI_DB_DATA_BYPASS_EN, 1); /* Register doorbell with doorbell recovery mechanism */ rval = qedi_ops->common->db_recovery_add(qedi->cdev, qedi_ep->p_doorbell, &qedi_ep->db_data, DB_REC_WIDTH_32B, DB_REC_KERNEL); if (rval) { kfree(conn_info); return rval; } rval = qedi_ops->offload_conn(qedi->cdev, qedi_ep->handle, conn_info); if (rval) { /* delete doorbell from doorbell recovery mechanism */ rval = qedi_ops->common->db_recovery_del(qedi->cdev, qedi_ep->p_doorbell, &qedi_ep->db_data); QEDI_ERR(&qedi->dbg_ctx, "offload_conn returned %d, ep=%p\n", rval, qedi_ep); } kfree(conn_info); return rval; } static int qedi_conn_start(struct iscsi_cls_conn *cls_conn) { struct iscsi_conn *conn = cls_conn->dd_data; struct qedi_conn *qedi_conn = conn->dd_data; struct qedi_ctx *qedi; int rval; qedi = qedi_conn->qedi; rval = qedi_iscsi_update_conn(qedi, qedi_conn); if (rval) { iscsi_conn_printk(KERN_ALERT, conn, "conn_start: FW offload conn failed.\n"); rval = -EINVAL; goto start_err; } spin_lock(&qedi_conn->tmf_work_lock); qedi_conn->fw_cleanup_works = 0; qedi_conn->ep_disconnect_starting = false; spin_unlock(&qedi_conn->tmf_work_lock); qedi_conn->abrt_conn = 0; rval = iscsi_conn_start(cls_conn); if (rval) { iscsi_conn_printk(KERN_ALERT, conn, "iscsi_conn_start: FW offload conn failed!!\n"); } start_err: return rval; } static void qedi_conn_destroy(struct iscsi_cls_conn *cls_conn) { struct iscsi_conn *conn = cls_conn->dd_data; struct qedi_conn *qedi_conn = conn->dd_data; struct Scsi_Host *shost; struct qedi_ctx *qedi; shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn)); qedi = iscsi_host_priv(shost); qedi_conn_free_login_resources(qedi, qedi_conn); iscsi_conn_teardown(cls_conn); } static int qedi_ep_get_param(struct iscsi_endpoint *ep, enum iscsi_param param, char *buf) { struct qedi_endpoint *qedi_ep = ep->dd_data; int len; if (!qedi_ep) return -ENOTCONN; switch (param) { case ISCSI_PARAM_CONN_PORT: len = sprintf(buf, "%hu\n", qedi_ep->dst_port); break; case ISCSI_PARAM_CONN_ADDRESS: if (qedi_ep->ip_type == TCP_IPV4) len = sprintf(buf, "%pI4\n", qedi_ep->dst_addr); else len = sprintf(buf, "%pI6\n", qedi_ep->dst_addr); break; default: return -ENOTCONN; } return len; } static int qedi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param, char *buf) { struct qedi_ctx *qedi; int len; qedi = iscsi_host_priv(shost); switch (param) { case ISCSI_HOST_PARAM_HWADDRESS: len = sysfs_format_mac(buf, qedi->mac, 6); break; case ISCSI_HOST_PARAM_NETDEV_NAME: len = sprintf(buf, "host%d\n", shost->host_no); break; case ISCSI_HOST_PARAM_IPADDRESS: if (qedi->ip_type == TCP_IPV4) len = sprintf(buf, "%pI4\n", qedi->src_ip); else len = sprintf(buf, "%pI6\n", qedi->src_ip); break; default: return iscsi_host_get_param(shost, param, buf); } return len; } static void qedi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats) { struct iscsi_conn *conn = cls_conn->dd_data; struct qed_iscsi_stats iscsi_stats; struct Scsi_Host *shost; struct qedi_ctx *qedi; shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn)); qedi = iscsi_host_priv(shost); qedi_ops->get_stats(qedi->cdev, &iscsi_stats); conn->txdata_octets = iscsi_stats.iscsi_tx_bytes_cnt; conn->rxdata_octets = iscsi_stats.iscsi_rx_bytes_cnt; conn->dataout_pdus_cnt = (uint32_t)iscsi_stats.iscsi_tx_data_pdu_cnt; conn->datain_pdus_cnt = (uint32_t)iscsi_stats.iscsi_rx_data_pdu_cnt; conn->r2t_pdus_cnt = (uint32_t)iscsi_stats.iscsi_rx_r2t_pdu_cnt; stats->txdata_octets = conn->txdata_octets; stats->rxdata_octets = conn->rxdata_octets; stats->scsicmd_pdus = conn->scsicmd_pdus_cnt; stats->dataout_pdus = conn->dataout_pdus_cnt; stats->scsirsp_pdus = conn->scsirsp_pdus_cnt; stats->datain_pdus = conn->datain_pdus_cnt; stats->r2t_pdus = conn->r2t_pdus_cnt; stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt; stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt; stats->digest_err = 0; stats->timeout_err = 0; strcpy(stats->custom[0].desc, "eh_abort_cnt"); stats->custom[0].value = conn->eh_abort_cnt; stats->custom_length = 1; } static void qedi_iscsi_prep_generic_pdu_bd(struct qedi_conn *qedi_conn) { struct scsi_sge *bd_tbl; bd_tbl = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl; bd_tbl->sge_addr.hi = (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32); bd_tbl->sge_addr.lo = (u32)qedi_conn->gen_pdu.req_dma_addr; bd_tbl->sge_len = qedi_conn->gen_pdu.req_wr_ptr - qedi_conn->gen_pdu.req_buf; bd_tbl = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl; bd_tbl->sge_addr.hi = (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32); bd_tbl->sge_addr.lo = (u32)qedi_conn->gen_pdu.resp_dma_addr; bd_tbl->sge_len = ISCSI_DEF_MAX_RECV_SEG_LEN; } static int qedi_iscsi_send_generic_request(struct iscsi_task *task) { struct qedi_cmd *cmd = task->dd_data; struct qedi_conn *qedi_conn = cmd->conn; char *buf; int data_len; int rc = 0; qedi_iscsi_prep_generic_pdu_bd(qedi_conn); switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { case ISCSI_OP_LOGIN: qedi_send_iscsi_login(qedi_conn, task); break; case ISCSI_OP_NOOP_OUT: data_len = qedi_conn->gen_pdu.req_buf_size; buf = qedi_conn->gen_pdu.req_buf; if (data_len) rc = qedi_send_iscsi_nopout(qedi_conn, task, buf, data_len, 1); else rc = qedi_send_iscsi_nopout(qedi_conn, task, NULL, 0, 1); break; case ISCSI_OP_LOGOUT: rc = qedi_send_iscsi_logout(qedi_conn, task); break; case ISCSI_OP_SCSI_TMFUNC: rc = qedi_send_iscsi_tmf(qedi_conn, task); break; case ISCSI_OP_TEXT: rc = qedi_send_iscsi_text(qedi_conn, task); break; default: iscsi_conn_printk(KERN_ALERT, qedi_conn->cls_conn->dd_data, "unsupported op 0x%x\n", task->hdr->opcode); } return rc; } static int qedi_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task) { struct qedi_conn *qedi_conn = conn->dd_data; struct qedi_cmd *cmd = task->dd_data; memset(qedi_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN); qedi_conn->gen_pdu.req_buf_size = task->data_count; if (task->data_count) { memcpy(qedi_conn->gen_pdu.req_buf, task->data, task->data_count); qedi_conn->gen_pdu.req_wr_ptr = qedi_conn->gen_pdu.req_buf + task->data_count; } cmd->conn = conn->dd_data; return qedi_iscsi_send_generic_request(task); } static int qedi_task_xmit(struct iscsi_task *task) { struct iscsi_conn *conn = task->conn; struct qedi_conn *qedi_conn = conn->dd_data; struct qedi_cmd *cmd = task->dd_data; struct scsi_cmnd *sc = task->sc; /* Clear now so in cleanup_task we know it didn't make it */ cmd->scsi_cmd = NULL; cmd->task_id = U16_MAX; if (test_bit(QEDI_IN_SHUTDOWN, &qedi_conn->qedi->flags)) return -ENODEV; if (test_bit(QEDI_BLOCK_IO, &qedi_conn->qedi->flags)) return -EACCES; cmd->state = 0; cmd->task = NULL; cmd->use_slowpath = false; cmd->conn = qedi_conn; cmd->task = task; cmd->io_cmd_in_list = false; INIT_LIST_HEAD(&cmd->io_cmd); if (!sc) return qedi_mtask_xmit(conn, task); cmd->scsi_cmd = sc; return qedi_iscsi_send_ioreq(task); } static void qedi_offload_work(struct work_struct *work) { struct qedi_endpoint *qedi_ep = container_of(work, struct qedi_endpoint, offload_work); struct qedi_ctx *qedi; int wait_delay = 5 * HZ; int ret; qedi = qedi_ep->qedi; ret = qedi_iscsi_offload_conn(qedi_ep); if (ret) { QEDI_ERR(&qedi->dbg_ctx, "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n", qedi_ep->iscsi_cid, qedi_ep, ret); qedi_ep->state = EP_STATE_OFLDCONN_FAILED; return; } ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait, (qedi_ep->state == EP_STATE_OFLDCONN_COMPL), wait_delay); if (ret <= 0 || qedi_ep->state != EP_STATE_OFLDCONN_COMPL) { qedi_ep->state = EP_STATE_OFLDCONN_FAILED; QEDI_ERR(&qedi->dbg_ctx, "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n", qedi_ep->iscsi_cid, qedi_ep); } } static struct iscsi_endpoint * qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, int non_blocking) { struct qedi_ctx *qedi; struct iscsi_endpoint *ep; struct qedi_endpoint *qedi_ep; struct sockaddr_in *addr; struct sockaddr_in6 *addr6; struct iscsi_path path_req; u32 msg_type = ISCSI_KEVENT_IF_DOWN; u32 iscsi_cid = QEDI_CID_RESERVED; u16 len = 0; char *buf = NULL; int ret, tmp; if (!shost) { ret = -ENXIO; QEDI_ERR(NULL, "shost is NULL\n"); return ERR_PTR(ret); } if (qedi_do_not_recover) { ret = -ENOMEM; return ERR_PTR(ret); } qedi = iscsi_host_priv(shost); if (test_bit(QEDI_IN_OFFLINE, &qedi->flags) || test_bit(QEDI_IN_RECOVERY, &qedi->flags)) { ret = -ENOMEM; return ERR_PTR(ret); } if (atomic_read(&qedi->link_state) != QEDI_LINK_UP) { QEDI_WARN(&qedi->dbg_ctx, "qedi link down\n"); return ERR_PTR(-ENXIO); } ep = iscsi_create_endpoint(sizeof(struct qedi_endpoint)); if (!ep) { QEDI_ERR(&qedi->dbg_ctx, "endpoint create fail\n"); ret = -ENOMEM; return ERR_PTR(ret); } qedi_ep = ep->dd_data; memset(qedi_ep, 0, sizeof(struct qedi_endpoint)); INIT_WORK(&qedi_ep->offload_work, qedi_offload_work); qedi_ep->state = EP_STATE_IDLE; qedi_ep->iscsi_cid = (u32)-1; qedi_ep->qedi = qedi; if (dst_addr->sa_family == AF_INET) { addr = (struct sockaddr_in *)dst_addr; memcpy(qedi_ep->dst_addr, &addr->sin_addr.s_addr, sizeof(struct in_addr)); qedi_ep->dst_port = ntohs(addr->sin_port); qedi_ep->ip_type = TCP_IPV4; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "dst_addr=%pI4, dst_port=%u\n", qedi_ep->dst_addr, qedi_ep->dst_port); } else if (dst_addr->sa_family == AF_INET6) { addr6 = (struct sockaddr_in6 *)dst_addr; memcpy(qedi_ep->dst_addr, &addr6->sin6_addr, sizeof(struct in6_addr)); qedi_ep->dst_port = ntohs(addr6->sin6_port); qedi_ep->ip_type = TCP_IPV6; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "dst_addr=%pI6, dst_port=%u\n", qedi_ep->dst_addr, qedi_ep->dst_port); } else { QEDI_ERR(&qedi->dbg_ctx, "Invalid endpoint\n"); } ret = qedi_alloc_sq(qedi, qedi_ep); if (ret) goto ep_conn_exit; ret = qedi_ops->acquire_conn(qedi->cdev, &qedi_ep->handle, &qedi_ep->fw_cid, &qedi_ep->p_doorbell); if (ret) { QEDI_ERR(&qedi->dbg_ctx, "Could not acquire connection\n"); ret = -ENXIO; goto ep_free_sq; } iscsi_cid = qedi_ep->handle; qedi_ep->iscsi_cid = iscsi_cid; init_waitqueue_head(&qedi_ep->ofld_wait); init_waitqueue_head(&qedi_ep->tcp_ofld_wait); qedi_ep->state = EP_STATE_OFLDCONN_START; qedi->ep_tbl[iscsi_cid] = qedi_ep; buf = (char *)&path_req; len = sizeof(path_req); memset(&path_req, 0, len); msg_type = ISCSI_KEVENT_PATH_REQ; path_req.handle = (u64)qedi_ep->iscsi_cid; path_req.pmtu = qedi->ll2_mtu; qedi_ep->pmtu = qedi->ll2_mtu; if (qedi_ep->ip_type == TCP_IPV4) { memcpy(&path_req.dst.v4_addr, &qedi_ep->dst_addr, sizeof(struct in_addr)); path_req.ip_addr_len = 4; } else { memcpy(&path_req.dst.v6_addr, &qedi_ep->dst_addr, sizeof(struct in6_addr)); path_req.ip_addr_len = 16; } ret = iscsi_offload_mesg(shost, &qedi_iscsi_transport, msg_type, buf, len); if (ret) { QEDI_ERR(&qedi->dbg_ctx, "iscsi_offload_mesg() failed for cid=0x%x ret=%d\n", iscsi_cid, ret); goto ep_rel_conn; } atomic_inc(&qedi->num_offloads); return ep; ep_rel_conn: qedi->ep_tbl[iscsi_cid] = NULL; tmp = qedi_ops->release_conn(qedi->cdev, qedi_ep->handle); if (tmp) QEDI_WARN(&qedi->dbg_ctx, "release_conn returned %d\n", tmp); ep_free_sq: qedi_free_sq(qedi, qedi_ep); ep_conn_exit: iscsi_destroy_endpoint(ep); return ERR_PTR(ret); } static int qedi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) { struct qedi_endpoint *qedi_ep; int ret = 0; if (qedi_do_not_recover) return 1; qedi_ep = ep->dd_data; if (qedi_ep->state == EP_STATE_IDLE || qedi_ep->state == EP_STATE_OFLDCONN_NONE || qedi_ep->state == EP_STATE_OFLDCONN_FAILED) return -1; if (qedi_ep->state == EP_STATE_OFLDCONN_COMPL) ret = 1; ret = wait_event_interruptible_timeout(qedi_ep->ofld_wait, QEDI_OFLD_WAIT_STATE(qedi_ep), msecs_to_jiffies(timeout_ms)); if (qedi_ep->state == EP_STATE_OFLDCONN_FAILED) ret = -1; if (ret > 0) return 1; else if (!ret) return 0; else return ret; } static void qedi_cleanup_active_cmd_list(struct qedi_conn *qedi_conn) { struct qedi_cmd *cmd, *cmd_tmp; spin_lock(&qedi_conn->list_lock); list_for_each_entry_safe(cmd, cmd_tmp, &qedi_conn->active_cmd_list, io_cmd) { list_del_init(&cmd->io_cmd); qedi_conn->active_cmd_count--; } spin_unlock(&qedi_conn->list_lock); } static void qedi_ep_disconnect(struct iscsi_endpoint *ep) { struct qedi_endpoint *qedi_ep; struct qedi_conn *qedi_conn = NULL; struct qedi_ctx *qedi; int ret = 0; int wait_delay; int abrt_conn = 0; wait_delay = 60 * HZ + DEF_MAX_RT_TIME; qedi_ep = ep->dd_data; qedi = qedi_ep->qedi; flush_work(&qedi_ep->offload_work); if (qedi_ep->state == EP_STATE_OFLDCONN_START) goto ep_exit_recover; if (qedi_ep->conn) { qedi_conn = qedi_ep->conn; abrt_conn = qedi_conn->abrt_conn; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "cid=0x%x qedi_ep=%p waiting for %d tmfs\n", qedi_ep->iscsi_cid, qedi_ep, qedi_conn->fw_cleanup_works); spin_lock(&qedi_conn->tmf_work_lock); qedi_conn->ep_disconnect_starting = true; while (qedi_conn->fw_cleanup_works > 0) { spin_unlock(&qedi_conn->tmf_work_lock); msleep(1000); spin_lock(&qedi_conn->tmf_work_lock); } spin_unlock(&qedi_conn->tmf_work_lock); if (test_bit(QEDI_IN_RECOVERY, &qedi->flags)) { if (qedi_do_not_recover) { QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "Do not recover cid=0x%x\n", qedi_ep->iscsi_cid); goto ep_exit_recover; } QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "Reset recovery cid=0x%x, qedi_ep=%p, state=0x%x\n", qedi_ep->iscsi_cid, qedi_ep, qedi_ep->state); qedi_cleanup_active_cmd_list(qedi_conn); goto ep_release_conn; } } if (qedi_do_not_recover) goto ep_exit_recover; switch (qedi_ep->state) { case EP_STATE_OFLDCONN_START: case EP_STATE_OFLDCONN_NONE: goto ep_release_conn; case EP_STATE_OFLDCONN_FAILED: break; case EP_STATE_OFLDCONN_COMPL: if (unlikely(!qedi_conn)) break; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "Active cmd count=%d, abrt_conn=%d, ep state=0x%x, cid=0x%x, qedi_conn=%p\n", qedi_conn->active_cmd_count, abrt_conn, qedi_ep->state, qedi_ep->iscsi_cid, qedi_ep->conn ); if (!qedi_conn->active_cmd_count) abrt_conn = 0; else abrt_conn = 1; if (abrt_conn) qedi_clearsq(qedi, qedi_conn, NULL); break; default: break; } if (!abrt_conn) wait_delay += qedi->pf_params.iscsi_pf_params.two_msl_timer; qedi_ep->state = EP_STATE_DISCONN_START; if (test_bit(QEDI_IN_SHUTDOWN, &qedi->flags) || test_bit(QEDI_IN_RECOVERY, &qedi->flags)) goto ep_release_conn; /* Delete doorbell from doorbell recovery mechanism */ ret = qedi_ops->common->db_recovery_del(qedi->cdev, qedi_ep->p_doorbell, &qedi_ep->db_data); ret = qedi_ops->destroy_conn(qedi->cdev, qedi_ep->handle, abrt_conn); if (ret) { QEDI_WARN(&qedi->dbg_ctx, "destroy_conn failed returned %d\n", ret); } else { ret = wait_event_interruptible_timeout( qedi_ep->tcp_ofld_wait, (qedi_ep->state != EP_STATE_DISCONN_START), wait_delay); if ((ret <= 0) || (qedi_ep->state == EP_STATE_DISCONN_START)) { QEDI_WARN(&qedi->dbg_ctx, "Destroy conn timedout or interrupted, ret=%d, delay=%d, cid=0x%x\n", ret, wait_delay, qedi_ep->iscsi_cid); } } ep_release_conn: ret = qedi_ops->release_conn(qedi->cdev, qedi_ep->handle); if (ret) QEDI_WARN(&qedi->dbg_ctx, "release_conn returned %d, cid=0x%x\n", ret, qedi_ep->iscsi_cid); ep_exit_recover: qedi_ep->state = EP_STATE_IDLE; qedi->ep_tbl[qedi_ep->iscsi_cid] = NULL; qedi->cid_que.conn_cid_tbl[qedi_ep->iscsi_cid] = NULL; qedi_free_id(&qedi->lcl_port_tbl, qedi_ep->src_port); qedi_free_sq(qedi, qedi_ep); if (qedi_conn) qedi_conn->ep = NULL; qedi_ep->conn = NULL; qedi_ep->qedi = NULL; atomic_dec(&qedi->num_offloads); iscsi_destroy_endpoint(ep); } static int qedi_data_avail(struct qedi_ctx *qedi, u16 vlanid) { struct qed_dev *cdev = qedi->cdev; struct qedi_uio_dev *udev; struct qedi_uio_ctrl *uctrl; struct sk_buff *skb; u32 len; int rc = 0; udev = qedi->udev; if (!udev) { QEDI_ERR(&qedi->dbg_ctx, "udev is NULL.\n"); return -EINVAL; } uctrl = (struct qedi_uio_ctrl *)udev->uctrl; if (!uctrl) { QEDI_ERR(&qedi->dbg_ctx, "uctlr is NULL.\n"); return -EINVAL; } len = uctrl->host_tx_pkt_len; if (!len) { QEDI_ERR(&qedi->dbg_ctx, "Invalid len %u\n", len); return -EINVAL; } skb = alloc_skb(len, GFP_ATOMIC); if (!skb) { QEDI_ERR(&qedi->dbg_ctx, "alloc_skb failed\n"); return -EINVAL; } skb_put(skb, len); memcpy(skb->data, udev->tx_pkt, len); skb->ip_summed = CHECKSUM_NONE; if (vlanid) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid); rc = qedi_ops->ll2->start_xmit(cdev, skb, 0); if (rc) { QEDI_ERR(&qedi->dbg_ctx, "ll2 start_xmit returned %d\n", rc); kfree_skb(skb); } uctrl->host_tx_pkt_len = 0; uctrl->hw_tx_cons++; return rc; } static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data) { struct qedi_ctx *qedi; struct qedi_endpoint *qedi_ep; int ret = 0; u32 iscsi_cid; u16 port_id = 0; if (!shost) { ret = -ENXIO; QEDI_ERR(NULL, "shost is NULL\n"); return ret; } if (strcmp(shost->hostt->proc_name, "qedi")) { ret = -ENXIO; QEDI_ERR(NULL, "shost %s is invalid\n", shost->hostt->proc_name); return ret; } qedi = iscsi_host_priv(shost); if (path_data->handle == QEDI_PATH_HANDLE) { ret = qedi_data_avail(qedi, path_data->vlan_id); goto set_path_exit; } iscsi_cid = (u32)path_data->handle; if (iscsi_cid >= qedi->max_active_conns) { ret = -EINVAL; goto set_path_exit; } qedi_ep = qedi->ep_tbl[iscsi_cid]; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "iscsi_cid=0x%x, qedi_ep=%p\n", iscsi_cid, qedi_ep); if (!qedi_ep) { ret = -EINVAL; goto set_path_exit; } if (!is_valid_ether_addr(&path_data->mac_addr[0])) { QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n"); qedi_ep->state = EP_STATE_OFLDCONN_NONE; ret = -EIO; goto set_path_exit; } ether_addr_copy(&qedi_ep->src_mac[0], &qedi->mac[0]); ether_addr_copy(&qedi_ep->dst_mac[0], &path_data->mac_addr[0]); qedi_ep->vlan_id = path_data->vlan_id; if (path_data->pmtu < DEF_PATH_MTU) { qedi_ep->pmtu = qedi->ll2_mtu; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "MTU cannot be %u, using default MTU %u\n", path_data->pmtu, qedi_ep->pmtu); } if (path_data->pmtu != qedi->ll2_mtu) { if (path_data->pmtu > JUMBO_MTU) { ret = -EINVAL; QEDI_ERR(NULL, "Invalid MTU %u\n", path_data->pmtu); goto set_path_exit; } qedi_reset_host_mtu(qedi, path_data->pmtu); qedi_ep->pmtu = qedi->ll2_mtu; } port_id = qedi_ep->src_port; if (port_id >= QEDI_LOCAL_PORT_MIN && port_id < QEDI_LOCAL_PORT_MAX) { if (qedi_alloc_id(&qedi->lcl_port_tbl, port_id)) port_id = 0; } else { port_id = 0; } if (!port_id) { port_id = qedi_alloc_new_id(&qedi->lcl_port_tbl); if (port_id == QEDI_LOCAL_PORT_INVALID) { QEDI_ERR(&qedi->dbg_ctx, "Failed to allocate port id for iscsi_cid=0x%x\n", iscsi_cid); ret = -ENOMEM; goto set_path_exit; } } qedi_ep->src_port = port_id; if (qedi_ep->ip_type == TCP_IPV4) { memcpy(&qedi_ep->src_addr[0], &path_data->src.v4_addr, sizeof(struct in_addr)); memcpy(&qedi->src_ip[0], &path_data->src.v4_addr, sizeof(struct in_addr)); qedi->ip_type = TCP_IPV4; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "src addr:port=%pI4:%u, dst addr:port=%pI4:%u\n", qedi_ep->src_addr, qedi_ep->src_port, qedi_ep->dst_addr, qedi_ep->dst_port); } else { memcpy(&qedi_ep->src_addr[0], &path_data->src.v6_addr, sizeof(struct in6_addr)); memcpy(&qedi->src_ip[0], &path_data->src.v6_addr, sizeof(struct in6_addr)); qedi->ip_type = TCP_IPV6; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "src addr:port=%pI6:%u, dst addr:port=%pI6:%u\n", qedi_ep->src_addr, qedi_ep->src_port, qedi_ep->dst_addr, qedi_ep->dst_port); } queue_work(qedi->offload_thread, &qedi_ep->offload_work); ret = 0; set_path_exit: return ret; } static umode_t qedi_attr_is_visible(int param_type, int param) { switch (param_type) { case ISCSI_HOST_PARAM: switch (param) { case ISCSI_HOST_PARAM_NETDEV_NAME: case ISCSI_HOST_PARAM_HWADDRESS: case ISCSI_HOST_PARAM_IPADDRESS: return 0444; default: return 0; } case ISCSI_PARAM: switch (param) { case ISCSI_PARAM_MAX_RECV_DLENGTH: case ISCSI_PARAM_MAX_XMIT_DLENGTH: case ISCSI_PARAM_HDRDGST_EN: case ISCSI_PARAM_DATADGST_EN: case ISCSI_PARAM_CONN_ADDRESS: case ISCSI_PARAM_CONN_PORT: case ISCSI_PARAM_EXP_STATSN: case ISCSI_PARAM_PERSISTENT_ADDRESS: case ISCSI_PARAM_PERSISTENT_PORT: case ISCSI_PARAM_PING_TMO: case ISCSI_PARAM_RECV_TMO: case ISCSI_PARAM_INITIAL_R2T_EN: case ISCSI_PARAM_MAX_R2T: case ISCSI_PARAM_IMM_DATA_EN: case ISCSI_PARAM_FIRST_BURST: case ISCSI_PARAM_MAX_BURST: case ISCSI_PARAM_PDU_INORDER_EN: case ISCSI_PARAM_DATASEQ_INORDER_EN: case ISCSI_PARAM_ERL: case ISCSI_PARAM_TARGET_NAME: case ISCSI_PARAM_TPGT: case ISCSI_PARAM_USERNAME: case ISCSI_PARAM_PASSWORD: case ISCSI_PARAM_USERNAME_IN: case ISCSI_PARAM_PASSWORD_IN: case ISCSI_PARAM_FAST_ABORT: case ISCSI_PARAM_ABORT_TMO: case ISCSI_PARAM_LU_RESET_TMO: case ISCSI_PARAM_TGT_RESET_TMO: case ISCSI_PARAM_IFACE_NAME: case ISCSI_PARAM_INITIATOR_NAME: case ISCSI_PARAM_BOOT_ROOT: case ISCSI_PARAM_BOOT_NIC: case ISCSI_PARAM_BOOT_TARGET: return 0444; default: return 0; } } return 0; } static void qedi_cleanup_task(struct iscsi_task *task) { struct qedi_cmd *cmd; if (task->state == ISCSI_TASK_PENDING) { QEDI_INFO(NULL, QEDI_LOG_IO, "Returning ref_cnt=%d\n", refcount_read(&task->refcount)); return; } if (task->sc) qedi_iscsi_unmap_sg_list(task->dd_data); cmd = task->dd_data; if (cmd->task_id != U16_MAX) qedi_clear_task_idx(iscsi_host_priv(task->conn->session->host), cmd->task_id); cmd->task_id = U16_MAX; cmd->scsi_cmd = NULL; } struct iscsi_transport qedi_iscsi_transport = { .owner = THIS_MODULE, .name = QEDI_MODULE_NAME, .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD | CAP_TEXT_NEGO, .create_session = qedi_session_create, .destroy_session = qedi_session_destroy, .create_conn = qedi_conn_create, .bind_conn = qedi_conn_bind, .unbind_conn = iscsi_conn_unbind, .start_conn = qedi_conn_start, .stop_conn = iscsi_conn_stop, .destroy_conn = qedi_conn_destroy, .set_param = iscsi_set_param, .get_ep_param = qedi_ep_get_param, .get_conn_param = iscsi_conn_get_param, .get_session_param = iscsi_session_get_param, .get_host_param = qedi_host_get_param, .send_pdu = iscsi_conn_send_pdu, .get_stats = qedi_conn_get_stats, .xmit_task = qedi_task_xmit, .cleanup_task = qedi_cleanup_task, .session_recovery_timedout = iscsi_session_recovery_timedout, .ep_connect = qedi_ep_connect, .ep_poll = qedi_ep_poll, .ep_disconnect = qedi_ep_disconnect, .set_path = qedi_set_path, .attr_is_visible = qedi_attr_is_visible, }; void qedi_start_conn_recovery(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn) { struct iscsi_cls_session *cls_sess; struct iscsi_cls_conn *cls_conn; struct iscsi_conn *conn; cls_conn = qedi_conn->cls_conn; conn = cls_conn->dd_data; cls_sess = iscsi_conn_to_session(cls_conn); if (iscsi_is_session_online(cls_sess)) { qedi_conn->abrt_conn = 1; QEDI_ERR(&qedi->dbg_ctx, "Failing connection, state=0x%x, cid=0x%x\n", conn->session->state, qedi_conn->iscsi_conn_id); iscsi_conn_failure(qedi_conn->cls_conn->dd_data, ISCSI_ERR_CONN_FAILED); } } static const struct { enum iscsi_error_types error_code; char *err_string; } qedi_iscsi_error[] = { { ISCSI_STATUS_NONE, "tcp_error none" }, { ISCSI_CONN_ERROR_TASK_CID_MISMATCH, "task cid mismatch" }, { ISCSI_CONN_ERROR_TASK_NOT_VALID, "invalid task" }, { ISCSI_CONN_ERROR_RQ_RING_IS_FULL, "rq ring full" }, { ISCSI_CONN_ERROR_CMDQ_RING_IS_FULL, "cmdq ring full" }, { ISCSI_CONN_ERROR_HQE_CACHING_FAILED, "sge caching failed" }, { ISCSI_CONN_ERROR_HEADER_DIGEST_ERROR, "hdr digest error" }, { ISCSI_CONN_ERROR_LOCAL_COMPLETION_ERROR, "local cmpl error" }, { ISCSI_CONN_ERROR_DATA_OVERRUN, "invalid task" }, { ISCSI_CONN_ERROR_OUT_OF_SGES_ERROR, "out of sge error" }, { ISCSI_CONN_ERROR_TCP_IP_FRAGMENT_ERROR, "tcp ip fragment error" }, { ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_LEN, "AHS len protocol error" }, { ISCSI_CONN_ERROR_PROTOCOL_ERR_ITT_OUT_OF_RANGE, "itt out of range error" }, { ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_EXCEEDS_PDU_SIZE, "data seg more than pdu size" }, { ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE, "invalid opcode" }, { ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE_BEFORE_UPDATE, "invalid opcode before update" }, { ISCSI_CONN_ERROR_UNVALID_NOPIN_DSL, "unexpected opcode" }, { ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_CARRIES_NO_DATA, "r2t carries no data" }, { ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SN, "data sn error" }, { ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_IN_TTT, "data TTT error" }, { ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_TTT, "r2t TTT error" }, { ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_BUFFER_OFFSET, "buffer offset error" }, { ISCSI_CONN_ERROR_PROTOCOL_ERR_BUFFER_OFFSET_OOO, "buffer offset ooo" }, { ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_SN, "data seg len 0" }, { ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0, "data xer len error" }, { ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1, "data xer len1 error" }, { ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_2, "data xer len2 error" }, { ISCSI_CONN_ERROR_PROTOCOL_ERR_LUN, "protocol lun error" }, { ISCSI_CONN_ERROR_PROTOCOL_ERR_F_BIT_ZERO, "f bit zero error" }, { ISCSI_CONN_ERROR_PROTOCOL_ERR_EXP_STAT_SN, "exp stat sn error" }, { ISCSI_CONN_ERROR_PROTOCOL_ERR_DSL_NOT_ZERO, "dsl not zero error" }, { ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_DSL, "invalid dsl" }, { ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_TOO_BIG, "data seg len too big" }, { ISCSI_CONN_ERROR_PROTOCOL_ERR_OUTSTANDING_R2T_COUNT, "outstanding r2t count error" }, { ISCSI_CONN_ERROR_SENSE_DATA_LENGTH, "sense datalen error" }, }; static char *qedi_get_iscsi_error(enum iscsi_error_types err_code) { int i; char *msg = NULL; for (i = 0; i < ARRAY_SIZE(qedi_iscsi_error); i++) { if (qedi_iscsi_error[i].error_code == err_code) { msg = qedi_iscsi_error[i].err_string; break; } } return msg; } void qedi_process_iscsi_error(struct qedi_endpoint *ep, struct iscsi_eqe_data *data) { struct qedi_conn *qedi_conn; struct qedi_ctx *qedi; char warn_notice[] = "iscsi_warning"; char error_notice[] = "iscsi_error"; char unknown_msg[] = "Unknown error"; char *message; int need_recovery = 0; u32 err_mask = 0; char *msg; if (!ep) return; qedi_conn = ep->conn; if (!qedi_conn) return; qedi = ep->qedi; QEDI_ERR(&qedi->dbg_ctx, "async event iscsi error:0x%x\n", data->error_code); if (err_mask) { need_recovery = 0; message = warn_notice; } else { need_recovery = 1; message = error_notice; } msg = qedi_get_iscsi_error(data->error_code); if (!msg) { need_recovery = 0; msg = unknown_msg; } iscsi_conn_printk(KERN_ALERT, qedi_conn->cls_conn->dd_data, "qedi: %s - %s\n", message, msg); if (need_recovery) qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn); } void qedi_process_tcp_error(struct qedi_endpoint *ep, struct iscsi_eqe_data *data) { struct qedi_conn *qedi_conn; if (!ep) return; qedi_conn = ep->conn; if (!qedi_conn) return; QEDI_ERR(&ep->qedi->dbg_ctx, "async event TCP error:0x%x\n", data->error_code); qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn); }
linux-master
drivers/scsi/qedi/qedi_iscsi.c
// SPDX-License-Identifier: GPL-2.0-only /* QLogic iSCSI Offload Driver * Copyright (c) 2016 Cavium Inc. */ #include <linux/types.h> #include <asm/byteorder.h> #include "qedi_hsi.h" #include <linux/qed/qed_if.h> #include "qedi_fw_iscsi.h" #include "qedi_fw_scsi.h" #define SCSI_NUM_SGES_IN_CACHE 0x4 static bool scsi_is_slow_sgl(u16 num_sges, bool small_mid_sge) { return (num_sges > SCSI_NUM_SGES_SLOW_SGL_THR && small_mid_sge); } static void init_scsi_sgl_context(struct scsi_sgl_params *ctx_sgl_params, struct scsi_cached_sges *ctx_data_desc, struct scsi_sgl_task_params *sgl_task_params) { u8 sge_index; u8 num_sges; u32 val; num_sges = (sgl_task_params->num_sges > SCSI_NUM_SGES_IN_CACHE) ? SCSI_NUM_SGES_IN_CACHE : sgl_task_params->num_sges; /* sgl params */ val = cpu_to_le32(sgl_task_params->sgl_phys_addr.lo); ctx_sgl_params->sgl_addr.lo = val; val = cpu_to_le32(sgl_task_params->sgl_phys_addr.hi); ctx_sgl_params->sgl_addr.hi = val; val = cpu_to_le32(sgl_task_params->total_buffer_size); ctx_sgl_params->sgl_total_length = val; ctx_sgl_params->sgl_num_sges = cpu_to_le16(sgl_task_params->num_sges); for (sge_index = 0; sge_index < num_sges; sge_index++) { val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.lo); ctx_data_desc->sge[sge_index].sge_addr.lo = val; val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.hi); ctx_data_desc->sge[sge_index].sge_addr.hi = val; val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_len); ctx_data_desc->sge[sge_index].sge_len = val; } } static u32 calc_rw_task_size(struct iscsi_task_params *task_params, enum iscsi_task_type task_type, struct scsi_sgl_task_params *sgl_task_params, struct scsi_dif_task_params *dif_task_params) { u32 io_size; if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE || task_type == ISCSI_TASK_TYPE_TARGET_READ) io_size = task_params->tx_io_size; else io_size = task_params->rx_io_size; if (!io_size) return 0; if (!dif_task_params) return io_size; return !dif_task_params->dif_on_network ? io_size : sgl_task_params->total_buffer_size; } static void init_dif_context_flags(struct iscsi_dif_flags *ctx_dif_flags, struct scsi_dif_task_params *dif_task_params) { if (!dif_task_params) return; SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG, dif_task_params->dif_block_size_log); SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_DIF_TO_PEER, dif_task_params->dif_on_network ? 1 : 0); SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_HOST_INTERFACE, dif_task_params->dif_on_host ? 1 : 0); } static void init_sqe(struct iscsi_task_params *task_params, struct scsi_sgl_task_params *sgl_task_params, struct scsi_dif_task_params *dif_task_params, struct iscsi_common_hdr *pdu_header, struct scsi_initiator_cmd_params *cmd_params, enum iscsi_task_type task_type, bool is_cleanup) { if (!task_params->sqe) return; memset(task_params->sqe, 0, sizeof(*task_params->sqe)); task_params->sqe->task_id = cpu_to_le16(task_params->itid); if (is_cleanup) { SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE, ISCSI_WQE_TYPE_TASK_CLEANUP); return; } switch (task_type) { case ISCSI_TASK_TYPE_INITIATOR_WRITE: { u32 buf_size = 0; u32 num_sges = 0; init_dif_context_flags(&task_params->sqe->prot_flags, dif_task_params); SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE, ISCSI_WQE_TYPE_NORMAL); if (task_params->tx_io_size) { buf_size = calc_rw_task_size(task_params, task_type, sgl_task_params, dif_task_params); if (scsi_is_slow_sgl(sgl_task_params->num_sges, sgl_task_params->small_mid_sge)) num_sges = ISCSI_WQE_NUM_SGES_SLOWIO; else num_sges = min(sgl_task_params->num_sges, (u16)SCSI_NUM_SGES_SLOW_SGL_THR); } SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES, num_sges); SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CONT_LEN, buf_size); if (GET_FIELD(pdu_header->hdr_second_dword, ISCSI_CMD_HDR_TOTAL_AHS_LEN)) SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CDB_SIZE, cmd_params->extended_cdb_sge.sge_len); } break; case ISCSI_TASK_TYPE_INITIATOR_READ: SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE, ISCSI_WQE_TYPE_NORMAL); if (GET_FIELD(pdu_header->hdr_second_dword, ISCSI_CMD_HDR_TOTAL_AHS_LEN)) SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CDB_SIZE, cmd_params->extended_cdb_sge.sge_len); break; case ISCSI_TASK_TYPE_LOGIN_RESPONSE: case ISCSI_TASK_TYPE_MIDPATH: { bool advance_statsn = true; if (task_type == ISCSI_TASK_TYPE_LOGIN_RESPONSE) SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE, ISCSI_WQE_TYPE_LOGIN); else SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE, ISCSI_WQE_TYPE_MIDDLE_PATH); if (task_type == ISCSI_TASK_TYPE_MIDPATH) { u8 opcode = GET_FIELD(pdu_header->hdr_first_byte, ISCSI_COMMON_HDR_OPCODE); if (opcode != ISCSI_OPCODE_TEXT_RESPONSE && (opcode != ISCSI_OPCODE_NOP_IN || pdu_header->itt == ISCSI_TTT_ALL_ONES)) advance_statsn = false; } SET_FIELD(task_params->sqe->flags, ISCSI_WQE_RESPONSE, advance_statsn ? 1 : 0); if (task_params->tx_io_size) { SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CONT_LEN, task_params->tx_io_size); if (scsi_is_slow_sgl(sgl_task_params->num_sges, sgl_task_params->small_mid_sge)) SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES, ISCSI_WQE_NUM_SGES_SLOWIO); else SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES, min(sgl_task_params->num_sges, (u16)SCSI_NUM_SGES_SLOW_SGL_THR)); } } break; default: break; } } static void init_default_iscsi_task(struct iscsi_task_params *task_params, struct data_hdr *pdu_header, enum iscsi_task_type task_type) { struct iscsi_task_context *context; u32 val; u16 index; u8 val_byte; context = task_params->context; val_byte = context->mstorm_ag_context.cdu_validation; memset(context, 0, sizeof(*context)); context->mstorm_ag_context.cdu_validation = val_byte; for (index = 0; index < ARRAY_SIZE(context->ystorm_st_context.pdu_hdr.data.data); index++) { val = cpu_to_le32(pdu_header->data[index]); context->ystorm_st_context.pdu_hdr.data.data[index] = val; } context->mstorm_st_context.task_type = task_type; context->mstorm_ag_context.task_cid = cpu_to_le16(task_params->conn_icid); SET_FIELD(context->ustorm_ag_context.flags1, USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1); context->ustorm_st_context.task_type = task_type; context->ustorm_st_context.cq_rss_number = task_params->cq_rss_number; context->ustorm_ag_context.icid = cpu_to_le16(task_params->conn_icid); } static void init_initiator_rw_cdb_ystorm_context(struct ystorm_iscsi_task_st_ctx *ystc, struct scsi_initiator_cmd_params *cmd) { union iscsi_task_hdr *ctx_pdu_hdr = &ystc->pdu_hdr; u32 val; if (!cmd->extended_cdb_sge.sge_len) return; SET_FIELD(ctx_pdu_hdr->ext_cdb_cmd.hdr_second_dword, ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE, cmd->extended_cdb_sge.sge_len); val = cpu_to_le32(cmd->extended_cdb_sge.sge_addr.lo); ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_addr.lo = val; val = cpu_to_le32(cmd->extended_cdb_sge.sge_addr.hi); ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_addr.hi = val; val = cpu_to_le32(cmd->extended_cdb_sge.sge_len); ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_len = val; } static void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt, struct ustorm_iscsi_task_ag_ctx *ustorm_ag_cxt, u32 remaining_recv_len, u32 expected_data_transfer_len, u8 num_sges, bool tx_dif_conn_err_en) { u32 val; ustorm_st_cxt->rem_rcv_len = cpu_to_le32(remaining_recv_len); ustorm_ag_cxt->exp_data_acked = cpu_to_le32(expected_data_transfer_len); val = cpu_to_le32(expected_data_transfer_len); ustorm_st_cxt->exp_data_transfer_len = val; SET_FIELD(ustorm_st_cxt->reg1.reg1_map, ISCSI_REG1_NUM_SGES, num_sges); SET_FIELD(ustorm_ag_cxt->flags2, USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN, tx_dif_conn_err_en ? 1 : 0); } static void set_rw_exp_data_acked_and_cont_len(struct iscsi_task_context *context, struct iscsi_conn_params *conn_params, enum iscsi_task_type task_type, u32 task_size, u32 exp_data_transfer_len, u8 total_ahs_length) { u32 max_unsolicited_data = 0, val; if (total_ahs_length && (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE || task_type == ISCSI_TASK_TYPE_INITIATOR_READ)) SET_FIELD(context->ustorm_st_context.flags2, USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST, 1); switch (task_type) { case ISCSI_TASK_TYPE_INITIATOR_WRITE: if (!conn_params->initial_r2t) max_unsolicited_data = conn_params->first_burst_length; else if (conn_params->immediate_data) max_unsolicited_data = min(conn_params->first_burst_length, conn_params->max_send_pdu_length); context->ustorm_ag_context.exp_data_acked = cpu_to_le32(total_ahs_length == 0 ? min(exp_data_transfer_len, max_unsolicited_data) : ((u32)(total_ahs_length + ISCSI_AHS_CNTL_SIZE))); break; case ISCSI_TASK_TYPE_TARGET_READ: val = cpu_to_le32(exp_data_transfer_len); context->ustorm_ag_context.exp_data_acked = val; break; case ISCSI_TASK_TYPE_INITIATOR_READ: context->ustorm_ag_context.exp_data_acked = cpu_to_le32((total_ahs_length == 0 ? 0 : total_ahs_length + ISCSI_AHS_CNTL_SIZE)); break; case ISCSI_TASK_TYPE_TARGET_WRITE: val = cpu_to_le32(task_size); context->ustorm_ag_context.exp_cont_len = val; break; default: break; } } static void init_rtdif_task_context(struct rdif_task_context *rdif_context, struct tdif_task_context *tdif_context, struct scsi_dif_task_params *dif_task_params, enum iscsi_task_type task_type) { u32 val; if (!dif_task_params->dif_on_network || !dif_task_params->dif_on_host) return; if (task_type == ISCSI_TASK_TYPE_TARGET_WRITE || task_type == ISCSI_TASK_TYPE_INITIATOR_READ) { rdif_context->app_tag_value = cpu_to_le16(dif_task_params->application_tag); rdif_context->partial_crc_value = cpu_to_le16(0xffff); val = cpu_to_le32(dif_task_params->initial_ref_tag); rdif_context->initial_ref_tag = val; rdif_context->app_tag_mask = cpu_to_le16(dif_task_params->application_tag_mask); SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_CRC_SEED, dif_task_params->crc_seed ? 1 : 0); SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_HOST_GUARD_TYPE, dif_task_params->host_guard_type); SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_PROTECTION_TYPE, dif_task_params->protection_type); SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID, 1); SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST, dif_task_params->keep_ref_tag_const ? 1 : 0); SET_FIELD(rdif_context->flags1, RDIF_TASK_CONTEXT_VALIDATE_APP_TAG, (dif_task_params->validate_app_tag && dif_task_params->dif_on_network) ? 1 : 0); SET_FIELD(rdif_context->flags1, RDIF_TASK_CONTEXT_VALIDATE_GUARD, (dif_task_params->validate_guard && dif_task_params->dif_on_network) ? 1 : 0); SET_FIELD(rdif_context->flags1, RDIF_TASK_CONTEXT_VALIDATE_REF_TAG, (dif_task_params->validate_ref_tag && dif_task_params->dif_on_network) ? 1 : 0); SET_FIELD(rdif_context->flags1, RDIF_TASK_CONTEXT_HOST_INTERFACE, dif_task_params->dif_on_host ? 1 : 0); SET_FIELD(rdif_context->flags1, RDIF_TASK_CONTEXT_NETWORK_INTERFACE, dif_task_params->dif_on_network ? 1 : 0); SET_FIELD(rdif_context->flags1, RDIF_TASK_CONTEXT_FORWARD_GUARD, dif_task_params->forward_guard ? 1 : 0); SET_FIELD(rdif_context->flags1, RDIF_TASK_CONTEXT_FORWARD_APP_TAG, dif_task_params->forward_app_tag ? 1 : 0); SET_FIELD(rdif_context->flags1, RDIF_TASK_CONTEXT_FORWARD_REF_TAG, dif_task_params->forward_ref_tag ? 1 : 0); SET_FIELD(rdif_context->flags1, RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK, dif_task_params->forward_app_tag_with_mask ? 1 : 0); SET_FIELD(rdif_context->flags1, RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK, dif_task_params->forward_ref_tag_with_mask ? 1 : 0); SET_FIELD(rdif_context->flags1, RDIF_TASK_CONTEXT_INTERVAL_SIZE, dif_task_params->dif_block_size_log - 9); SET_FIELD(rdif_context->state, RDIF_TASK_CONTEXT_REF_TAG_MASK, dif_task_params->ref_tag_mask); SET_FIELD(rdif_context->state, RDIF_TASK_CONTEXT_IGNORE_APP_TAG, dif_task_params->ignore_app_tag); } if (task_type == ISCSI_TASK_TYPE_TARGET_READ || task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) { tdif_context->app_tag_value = cpu_to_le16(dif_task_params->application_tag); tdif_context->partial_crc_value_b = cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000); tdif_context->partial_crc_value_a = cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000); SET_FIELD(tdif_context->flags0, TDIF_TASK_CONTEXT_CRC_SEED, dif_task_params->crc_seed ? 1 : 0); SET_FIELD(tdif_context->flags0, TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP, dif_task_params->tx_dif_conn_err_en ? 1 : 0); SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARD_GUARD, dif_task_params->forward_guard ? 1 : 0); SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARD_APP_TAG, dif_task_params->forward_app_tag ? 1 : 0); SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARD_REF_TAG, dif_task_params->forward_ref_tag ? 1 : 0); SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_INTERVAL_SIZE, dif_task_params->dif_block_size_log - 9); SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_HOST_INTERFACE, dif_task_params->dif_on_host ? 1 : 0); SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_NETWORK_INTERFACE, dif_task_params->dif_on_network ? 1 : 0); val = cpu_to_le32(dif_task_params->initial_ref_tag); tdif_context->initial_ref_tag = val; tdif_context->app_tag_mask = cpu_to_le16(dif_task_params->application_tag_mask); SET_FIELD(tdif_context->flags0, TDIF_TASK_CONTEXT_HOST_GUARD_TYPE, dif_task_params->host_guard_type); SET_FIELD(tdif_context->flags0, TDIF_TASK_CONTEXT_PROTECTION_TYPE, dif_task_params->protection_type); SET_FIELD(tdif_context->flags0, TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID, dif_task_params->initial_ref_tag_is_valid ? 1 : 0); SET_FIELD(tdif_context->flags0, TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST, dif_task_params->keep_ref_tag_const ? 1 : 0); SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_VALIDATE_GUARD, (dif_task_params->validate_guard && dif_task_params->dif_on_host) ? 1 : 0); SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_VALIDATE_APP_TAG, (dif_task_params->validate_app_tag && dif_task_params->dif_on_host) ? 1 : 0); SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_VALIDATE_REF_TAG, (dif_task_params->validate_ref_tag && dif_task_params->dif_on_host) ? 1 : 0); SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK, dif_task_params->forward_app_tag_with_mask ? 1 : 0); SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK, dif_task_params->forward_ref_tag_with_mask ? 1 : 0); SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_REF_TAG_MASK, dif_task_params->ref_tag_mask); SET_FIELD(tdif_context->flags0, TDIF_TASK_CONTEXT_IGNORE_APP_TAG, dif_task_params->ignore_app_tag ? 1 : 0); } } static void set_local_completion_context(struct iscsi_task_context *context) { SET_FIELD(context->ystorm_st_context.state.flags, YSTORM_ISCSI_TASK_STATE_LOCAL_COMP, 1); SET_FIELD(context->ustorm_st_context.flags, USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 1); } static int init_rw_iscsi_task(struct iscsi_task_params *task_params, enum iscsi_task_type task_type, struct iscsi_conn_params *conn_params, struct iscsi_common_hdr *pdu_header, struct scsi_sgl_task_params *sgl_task_params, struct scsi_initiator_cmd_params *cmd_params, struct scsi_dif_task_params *dif_task_params) { u32 exp_data_transfer_len = conn_params->max_burst_length; struct iscsi_task_context *cxt; bool slow_io = false; u32 task_size, val; u8 num_sges = 0; task_size = calc_rw_task_size(task_params, task_type, sgl_task_params, dif_task_params); init_default_iscsi_task(task_params, (struct data_hdr *)pdu_header, task_type); cxt = task_params->context; if (task_type == ISCSI_TASK_TYPE_TARGET_READ) { set_local_completion_context(cxt); } else if (task_type == ISCSI_TASK_TYPE_TARGET_WRITE) { val = cpu_to_le32(task_size + ((struct iscsi_r2t_hdr *)pdu_header)->buffer_offset); cxt->ystorm_st_context.pdu_hdr.r2t.desired_data_trns_len = val; cxt->mstorm_st_context.expected_itt = cpu_to_le32(pdu_header->itt); } else { val = cpu_to_le32(task_size); cxt->ystorm_st_context.pdu_hdr.cmd.expected_transfer_length = val; init_initiator_rw_cdb_ystorm_context(&cxt->ystorm_st_context, cmd_params); val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.lo); cxt->mstorm_st_context.sense_db.lo = val; val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.hi); cxt->mstorm_st_context.sense_db.hi = val; } if (task_params->tx_io_size) { init_dif_context_flags(&cxt->ystorm_st_context.state.dif_flags, dif_task_params); init_dif_context_flags(&cxt->ustorm_st_context.dif_flags, dif_task_params); init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params, &cxt->ystorm_st_context.state.data_desc, sgl_task_params); slow_io = scsi_is_slow_sgl(sgl_task_params->num_sges, sgl_task_params->small_mid_sge); num_sges = !slow_io ? min_t(u16, sgl_task_params->num_sges, (u16)SCSI_NUM_SGES_SLOW_SGL_THR) : ISCSI_WQE_NUM_SGES_SLOWIO; if (slow_io) { SET_FIELD(cxt->ystorm_st_context.state.flags, YSTORM_ISCSI_TASK_STATE_SLOW_IO, 1); } } else if (task_params->rx_io_size) { init_dif_context_flags(&cxt->mstorm_st_context.dif_flags, dif_task_params); init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params, &cxt->mstorm_st_context.data_desc, sgl_task_params); num_sges = !scsi_is_slow_sgl(sgl_task_params->num_sges, sgl_task_params->small_mid_sge) ? min_t(u16, sgl_task_params->num_sges, (u16)SCSI_NUM_SGES_SLOW_SGL_THR) : ISCSI_WQE_NUM_SGES_SLOWIO; cxt->mstorm_st_context.rem_task_size = cpu_to_le32(task_size); } if (exp_data_transfer_len > task_size || task_type != ISCSI_TASK_TYPE_TARGET_WRITE) exp_data_transfer_len = task_size; init_ustorm_task_contexts(&task_params->context->ustorm_st_context, &task_params->context->ustorm_ag_context, task_size, exp_data_transfer_len, num_sges, dif_task_params ? dif_task_params->tx_dif_conn_err_en : false); set_rw_exp_data_acked_and_cont_len(task_params->context, conn_params, task_type, task_size, exp_data_transfer_len, GET_FIELD(pdu_header->hdr_second_dword, ISCSI_CMD_HDR_TOTAL_AHS_LEN)); if (dif_task_params) init_rtdif_task_context(&task_params->context->rdif_context, &task_params->context->tdif_context, dif_task_params, task_type); init_sqe(task_params, sgl_task_params, dif_task_params, pdu_header, cmd_params, task_type, false); return 0; } int init_initiator_rw_iscsi_task(struct iscsi_task_params *task_params, struct iscsi_conn_params *conn_params, struct scsi_initiator_cmd_params *cmd_params, struct iscsi_cmd_hdr *cmd_header, struct scsi_sgl_task_params *tx_sgl_params, struct scsi_sgl_task_params *rx_sgl_params, struct scsi_dif_task_params *dif_task_params) { if (GET_FIELD(cmd_header->flags_attr, ISCSI_CMD_HDR_WRITE)) return init_rw_iscsi_task(task_params, ISCSI_TASK_TYPE_INITIATOR_WRITE, conn_params, (struct iscsi_common_hdr *)cmd_header, tx_sgl_params, cmd_params, dif_task_params); else if (GET_FIELD(cmd_header->flags_attr, ISCSI_CMD_HDR_READ) || (task_params->rx_io_size == 0 && task_params->tx_io_size == 0)) return init_rw_iscsi_task(task_params, ISCSI_TASK_TYPE_INITIATOR_READ, conn_params, (struct iscsi_common_hdr *)cmd_header, rx_sgl_params, cmd_params, dif_task_params); else return -1; } int init_initiator_login_request_task(struct iscsi_task_params *task_params, struct iscsi_login_req_hdr *login_header, struct scsi_sgl_task_params *tx_params, struct scsi_sgl_task_params *rx_params) { struct iscsi_task_context *cxt; cxt = task_params->context; init_default_iscsi_task(task_params, (struct data_hdr *)login_header, ISCSI_TASK_TYPE_MIDPATH); init_ustorm_task_contexts(&cxt->ustorm_st_context, &cxt->ustorm_ag_context, task_params->rx_io_size ? rx_params->total_buffer_size : 0, task_params->tx_io_size ? tx_params->total_buffer_size : 0, 0, 0); if (task_params->tx_io_size) init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params, &cxt->ystorm_st_context.state.data_desc, tx_params); if (task_params->rx_io_size) init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params, &cxt->mstorm_st_context.data_desc, rx_params); cxt->mstorm_st_context.rem_task_size = cpu_to_le32(task_params->rx_io_size ? rx_params->total_buffer_size : 0); init_sqe(task_params, tx_params, NULL, (struct iscsi_common_hdr *)login_header, NULL, ISCSI_TASK_TYPE_MIDPATH, false); return 0; } int init_initiator_nop_out_task(struct iscsi_task_params *task_params, struct iscsi_nop_out_hdr *nop_out_pdu_header, struct scsi_sgl_task_params *tx_sgl_task_params, struct scsi_sgl_task_params *rx_sgl_task_params) { struct iscsi_task_context *cxt; cxt = task_params->context; init_default_iscsi_task(task_params, (struct data_hdr *)nop_out_pdu_header, ISCSI_TASK_TYPE_MIDPATH); if (nop_out_pdu_header->itt == ISCSI_ITT_ALL_ONES) set_local_completion_context(task_params->context); if (task_params->tx_io_size) init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params, &cxt->ystorm_st_context.state.data_desc, tx_sgl_task_params); if (task_params->rx_io_size) init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params, &cxt->mstorm_st_context.data_desc, rx_sgl_task_params); init_ustorm_task_contexts(&cxt->ustorm_st_context, &cxt->ustorm_ag_context, task_params->rx_io_size ? rx_sgl_task_params->total_buffer_size : 0, task_params->tx_io_size ? tx_sgl_task_params->total_buffer_size : 0, 0, 0); cxt->mstorm_st_context.rem_task_size = cpu_to_le32(task_params->rx_io_size ? rx_sgl_task_params->total_buffer_size : 0); init_sqe(task_params, tx_sgl_task_params, NULL, (struct iscsi_common_hdr *)nop_out_pdu_header, NULL, ISCSI_TASK_TYPE_MIDPATH, false); return 0; } int init_initiator_logout_request_task(struct iscsi_task_params *task_params, struct iscsi_logout_req_hdr *logout_hdr, struct scsi_sgl_task_params *tx_params, struct scsi_sgl_task_params *rx_params) { struct iscsi_task_context *cxt; cxt = task_params->context; init_default_iscsi_task(task_params, (struct data_hdr *)logout_hdr, ISCSI_TASK_TYPE_MIDPATH); if (task_params->tx_io_size) init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params, &cxt->ystorm_st_context.state.data_desc, tx_params); if (task_params->rx_io_size) init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params, &cxt->mstorm_st_context.data_desc, rx_params); init_ustorm_task_contexts(&cxt->ustorm_st_context, &cxt->ustorm_ag_context, task_params->rx_io_size ? rx_params->total_buffer_size : 0, task_params->tx_io_size ? tx_params->total_buffer_size : 0, 0, 0); cxt->mstorm_st_context.rem_task_size = cpu_to_le32(task_params->rx_io_size ? rx_params->total_buffer_size : 0); init_sqe(task_params, tx_params, NULL, (struct iscsi_common_hdr *)logout_hdr, NULL, ISCSI_TASK_TYPE_MIDPATH, false); return 0; } int init_initiator_tmf_request_task(struct iscsi_task_params *task_params, struct iscsi_tmf_request_hdr *tmf_header) { init_default_iscsi_task(task_params, (struct data_hdr *)tmf_header, ISCSI_TASK_TYPE_MIDPATH); init_sqe(task_params, NULL, NULL, (struct iscsi_common_hdr *)tmf_header, NULL, ISCSI_TASK_TYPE_MIDPATH, false); return 0; } int init_initiator_text_request_task(struct iscsi_task_params *task_params, struct iscsi_text_request_hdr *text_header, struct scsi_sgl_task_params *tx_params, struct scsi_sgl_task_params *rx_params) { struct iscsi_task_context *cxt; cxt = task_params->context; init_default_iscsi_task(task_params, (struct data_hdr *)text_header, ISCSI_TASK_TYPE_MIDPATH); if (task_params->tx_io_size) init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params, &cxt->ystorm_st_context.state.data_desc, tx_params); if (task_params->rx_io_size) init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params, &cxt->mstorm_st_context.data_desc, rx_params); cxt->mstorm_st_context.rem_task_size = cpu_to_le32(task_params->rx_io_size ? rx_params->total_buffer_size : 0); init_ustorm_task_contexts(&cxt->ustorm_st_context, &cxt->ustorm_ag_context, task_params->rx_io_size ? rx_params->total_buffer_size : 0, task_params->tx_io_size ? tx_params->total_buffer_size : 0, 0, 0); init_sqe(task_params, tx_params, NULL, (struct iscsi_common_hdr *)text_header, NULL, ISCSI_TASK_TYPE_MIDPATH, false); return 0; } int init_cleanup_task(struct iscsi_task_params *task_params) { init_sqe(task_params, NULL, NULL, NULL, NULL, ISCSI_TASK_TYPE_MIDPATH, true); return 0; }
linux-master
drivers/scsi/qedi/qedi_fw_api.c
/*====================================================================== A driver for the Qlogic SCSI card qlogic_cs.c 1.79 2000/06/12 21:27:26 The contents of this file are subject to the Mozilla Public License Version 1.1 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.mozilla.org/MPL/ Software distributed under the License is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for the specific language governing rights and limitations under the License. The initial developer of the original code is David A. Hinds <[email protected]>. Portions created by David A. Hinds are Copyright (C) 1999 David A. Hinds. All Rights Reserved. Alternatively, the contents of this file may be used under the terms of the GNU General Public License version 2 (the "GPL"), in which case the provisions of the GPL are applicable instead of the above. If you wish to allow the use of your version of this file only under the terms of the GPL and not to allow others to use your version of this file under the MPL, indicate your decision by deleting the provisions above and replace them with the notice and other provisions required by the GPL. If you do not delete the provisions above, a recipient may use your version of this file under either the MPL or the GPL. ======================================================================*/ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/ioport.h> #include <asm/io.h> #include <linux/major.h> #include <linux/blkdev.h> #include <linux/interrupt.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_host.h> #include <scsi/scsi_ioctl.h> #include <scsi/scsi_tcq.h> #include "../qlogicfas408.h" #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> #include <pcmcia/ciscode.h> /* Set the following to 2 to use normal interrupt (active high/totempole- * tristate), otherwise use 0 (REQUIRED FOR PCMCIA) for active low, open * drain */ #define INT_TYPE 0 static char qlogic_name[] = "qlogic_cs"; static struct scsi_host_template qlogicfas_driver_template = { .module = THIS_MODULE, .name = qlogic_name, .proc_name = qlogic_name, .info = qlogicfas408_info, .queuecommand = qlogicfas408_queuecommand, .eh_abort_handler = qlogicfas408_abort, .eh_host_reset_handler = qlogicfas408_host_reset, .bios_param = qlogicfas408_biosparam, .can_queue = 1, .this_id = -1, .sg_tablesize = SG_ALL, .dma_boundary = PAGE_SIZE - 1, }; /*====================================================================*/ typedef struct scsi_info_t { struct pcmcia_device *p_dev; struct Scsi_Host *host; unsigned short manf_id; } scsi_info_t; static void qlogic_release(struct pcmcia_device *link); static void qlogic_detach(struct pcmcia_device *p_dev); static int qlogic_config(struct pcmcia_device * link); static struct Scsi_Host *qlogic_detect(struct scsi_host_template *host, struct pcmcia_device *link, int qbase, int qlirq) { int qltyp; /* type of chip */ int qinitid; struct Scsi_Host *shost; /* registered host structure */ struct qlogicfas408_priv *priv; qltyp = qlogicfas408_get_chip_type(qbase, INT_TYPE); qinitid = host->this_id; if (qinitid < 0) qinitid = 7; /* if no ID, use 7 */ qlogicfas408_setup(qbase, qinitid, INT_TYPE); host->name = qlogic_name; shost = scsi_host_alloc(host, sizeof(struct qlogicfas408_priv)); if (!shost) goto err; shost->io_port = qbase; shost->n_io_port = 16; shost->dma_channel = -1; if (qlirq != -1) shost->irq = qlirq; priv = get_priv_by_host(shost); priv->qlirq = qlirq; priv->qbase = qbase; priv->qinitid = qinitid; priv->shost = shost; priv->int_type = INT_TYPE; if (request_irq(qlirq, qlogicfas408_ihandl, 0, qlogic_name, shost)) goto free_scsi_host; sprintf(priv->qinfo, "Qlogicfas Driver version 0.46, chip %02X at %03X, IRQ %d, TPdma:%d", qltyp, qbase, qlirq, QL_TURBO_PDMA); if (scsi_add_host(shost, NULL)) goto free_interrupt; scsi_scan_host(shost); return shost; free_interrupt: free_irq(qlirq, shost); free_scsi_host: scsi_host_put(shost); err: return NULL; } static int qlogic_probe(struct pcmcia_device *link) { scsi_info_t *info; dev_dbg(&link->dev, "qlogic_attach()\n"); /* Create new SCSI device */ info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; info->p_dev = link; link->priv = info; link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; link->config_regs = PRESENT_OPTION; return qlogic_config(link); } /* qlogic_attach */ /*====================================================================*/ static void qlogic_detach(struct pcmcia_device *link) { dev_dbg(&link->dev, "qlogic_detach\n"); qlogic_release(link); kfree(link->priv); } /* qlogic_detach */ /*====================================================================*/ static int qlogic_config_check(struct pcmcia_device *p_dev, void *priv_data) { p_dev->io_lines = 10; p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; if (p_dev->resource[0]->start == 0) return -ENODEV; return pcmcia_request_io(p_dev); } static int qlogic_config(struct pcmcia_device * link) { scsi_info_t *info = link->priv; int ret; struct Scsi_Host *host; dev_dbg(&link->dev, "qlogic_config\n"); ret = pcmcia_loop_config(link, qlogic_config_check, NULL); if (ret) goto failed; if (!link->irq) goto failed; ret = pcmcia_enable_device(link); if (ret) goto failed; if ((info->manf_id == MANFID_MACNICA) || (info->manf_id == MANFID_PIONEER) || (info->manf_id == 0x0098)) { /* set ATAcmd */ outb(0xb4, link->resource[0]->start + 0xd); outb(0x24, link->resource[0]->start + 0x9); outb(0x04, link->resource[0]->start + 0xd); } /* The KXL-810AN has a bigger IO port window */ if (resource_size(link->resource[0]) == 32) host = qlogic_detect(&qlogicfas_driver_template, link, link->resource[0]->start + 16, link->irq); else host = qlogic_detect(&qlogicfas_driver_template, link, link->resource[0]->start, link->irq); if (!host) { printk(KERN_INFO "%s: no SCSI devices found\n", qlogic_name); goto failed; } info->host = host; return 0; failed: pcmcia_disable_device(link); return -ENODEV; } /* qlogic_config */ /*====================================================================*/ static void qlogic_release(struct pcmcia_device *link) { scsi_info_t *info = link->priv; dev_dbg(&link->dev, "qlogic_release\n"); scsi_remove_host(info->host); free_irq(link->irq, info->host); pcmcia_disable_device(link); scsi_host_put(info->host); } /*====================================================================*/ static int qlogic_resume(struct pcmcia_device *link) { scsi_info_t *info = link->priv; int ret; ret = pcmcia_enable_device(link); if (ret) return ret; if ((info->manf_id == MANFID_MACNICA) || (info->manf_id == MANFID_PIONEER) || (info->manf_id == 0x0098)) { outb(0x80, link->resource[0]->start + 0xd); outb(0x24, link->resource[0]->start + 0x9); outb(0x04, link->resource[0]->start + 0xd); } /* Ugggglllyyyy!!! */ qlogicfas408_host_reset(NULL); return 0; } static const struct pcmcia_device_id qlogic_ids[] = { PCMCIA_DEVICE_PROD_ID12("EIger Labs", "PCMCIA-to-SCSI Adapter", 0x88395fa7, 0x33b7a5e6), PCMCIA_DEVICE_PROD_ID12("EPSON", "SCSI-2 PC Card SC200", 0xd361772f, 0x299d1751), PCMCIA_DEVICE_PROD_ID12("MACNICA", "MIRACLE SCSI-II mPS110", 0x20841b68, 0xab3c3b6d), PCMCIA_DEVICE_PROD_ID12("MIDORI ELECTRONICS ", "CN-SC43", 0x6534382a, 0xd67eee79), PCMCIA_DEVICE_PROD_ID12("NEC", "PC-9801N-J03R", 0x18df0ba0, 0x24662e8a), PCMCIA_DEVICE_PROD_ID12("KME ", "KXLC003", 0x82375a27, 0xf68e5bf7), PCMCIA_DEVICE_PROD_ID12("KME ", "KXLC004", 0x82375a27, 0x68eace54), PCMCIA_DEVICE_PROD_ID12("KME", "KXLC101", 0x3faee676, 0x194250ec), PCMCIA_DEVICE_PROD_ID12("QLOGIC CORPORATION", "pc05", 0xd77b2930, 0xa85b2735), PCMCIA_DEVICE_PROD_ID12("QLOGIC CORPORATION", "pc05 rev 1.10", 0xd77b2930, 0x70f8b5f8), PCMCIA_DEVICE_PROD_ID123("KME", "KXLC002", "00", 0x3faee676, 0x81896b61, 0xf99f065f), PCMCIA_DEVICE_PROD_ID12("RATOC System Inc.", "SCSI2 CARD 37", 0x85c10e17, 0x1a2640c1), PCMCIA_DEVICE_PROD_ID12("TOSHIBA", "SCSC200A PC CARD SCSI", 0xb4585a1a, 0xa6f06ebe), PCMCIA_DEVICE_PROD_ID12("TOSHIBA", "SCSC200B PC CARD SCSI-10", 0xb4585a1a, 0x0a88dea0), /* these conflict with other cards! */ /* PCMCIA_DEVICE_PROD_ID123("MACNICA", "MIRACLE SCSI", "mPS100", 0x20841b68, 0xf8dedaeb, 0x89f7fafb), */ /* PCMCIA_DEVICE_PROD_ID123("MACNICA", "MIRACLE SCSI", "mPS100", 0x20841b68, 0xf8dedaeb, 0x89f7fafb), */ PCMCIA_DEVICE_NULL, }; MODULE_DEVICE_TABLE(pcmcia, qlogic_ids); static struct pcmcia_driver qlogic_cs_driver = { .owner = THIS_MODULE, .name = "qlogic_cs", .probe = qlogic_probe, .remove = qlogic_detach, .id_table = qlogic_ids, .resume = qlogic_resume, }; MODULE_AUTHOR("Tom Zerucha, Michael Griffith"); MODULE_DESCRIPTION("Driver for the PCMCIA Qlogic FAS SCSI controllers"); MODULE_LICENSE("GPL"); module_pcmcia_driver(qlogic_cs_driver);
linux-master
drivers/scsi/pcmcia/qlogic_stub.c
#define AHA152X_PCMCIA 1 #define AHA152X_STAT 1 #include "aha152x.c"
linux-master
drivers/scsi/pcmcia/aha152x_core.c
/*====================================================================== NinjaSCSI-3 / NinjaSCSI-32Bi PCMCIA SCSI host adapter card driver By: YOKOTA Hiroshi <[email protected]> Ver.2.8 Support 32bit MMIO mode Support Synchronous Data Transfer Request (SDTR) mode Ver.2.0 Support 32bit PIO mode Ver.1.1.2 Fix for scatter list buffer exceeds Ver.1.1 Support scatter list Ver.0.1 Initial version This software may be used and distributed according to the terms of the GNU General Public License. ======================================================================*/ /*********************************************************************** This driver is for these PCcards. I-O DATA PCSC-F (Workbit NinjaSCSI-3) "WBT", "NinjaSCSI-3", "R1.0" I-O DATA CBSC-II (Workbit NinjaSCSI-32Bi in 16bit mode) "IO DATA", "CBSC16 ", "1" ***********************************************************************/ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/major.h> #include <linux/blkdev.h> #include <linux/stat.h> #include <asm/io.h> #include <asm/irq.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_host.h> #include <scsi/scsi_ioctl.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ds.h> #include "nsp_cs.h" MODULE_AUTHOR("YOKOTA Hiroshi <[email protected]>"); MODULE_DESCRIPTION("WorkBit NinjaSCSI-3 / NinjaSCSI-32Bi(16bit) PCMCIA SCSI host adapter module"); MODULE_LICENSE("GPL"); #include "nsp_io.h" /*====================================================================*/ /* Parameters that can be set with 'insmod' */ static int nsp_burst_mode = BURST_MEM32; module_param(nsp_burst_mode, int, 0); MODULE_PARM_DESC(nsp_burst_mode, "Burst transfer mode (0=io8, 1=io32, 2=mem32(default))"); /* Release IO ports after configuration? */ static bool free_ports = 0; module_param(free_ports, bool, 0); MODULE_PARM_DESC(free_ports, "Release IO ports after configuration? (default: 0 (=no))"); static struct scsi_pointer *nsp_priv(struct scsi_cmnd *cmd) { return scsi_cmd_priv(cmd); } static struct scsi_host_template nsp_driver_template = { .proc_name = "nsp_cs", .show_info = nsp_show_info, .name = "WorkBit NinjaSCSI-3/32Bi(16bit)", .info = nsp_info, .queuecommand = nsp_queuecommand, /* .eh_abort_handler = nsp_eh_abort,*/ .eh_bus_reset_handler = nsp_eh_bus_reset, .eh_host_reset_handler = nsp_eh_host_reset, .can_queue = 1, .this_id = NSP_INITIATOR_ID, .sg_tablesize = SG_ALL, .dma_boundary = PAGE_SIZE - 1, .cmd_size = sizeof(struct scsi_pointer), }; static nsp_hw_data nsp_data_base; /* attach <-> detect glue */ /* * debug, error print */ #ifndef NSP_DEBUG # define NSP_DEBUG_MASK 0x000000 # define nsp_msg(type, args...) nsp_cs_message("", 0, (type), args) # define nsp_dbg(mask, args...) /* */ #else # define NSP_DEBUG_MASK 0xffffff # define nsp_msg(type, args...) \ nsp_cs_message (__func__, __LINE__, (type), args) # define nsp_dbg(mask, args...) \ nsp_cs_dmessage(__func__, __LINE__, (mask), args) #endif #define NSP_DEBUG_QUEUECOMMAND BIT(0) #define NSP_DEBUG_REGISTER BIT(1) #define NSP_DEBUG_AUTOSCSI BIT(2) #define NSP_DEBUG_INTR BIT(3) #define NSP_DEBUG_SGLIST BIT(4) #define NSP_DEBUG_BUSFREE BIT(5) #define NSP_DEBUG_CDB_CONTENTS BIT(6) #define NSP_DEBUG_RESELECTION BIT(7) #define NSP_DEBUG_MSGINOCCUR BIT(8) #define NSP_DEBUG_EEPROM BIT(9) #define NSP_DEBUG_MSGOUTOCCUR BIT(10) #define NSP_DEBUG_BUSRESET BIT(11) #define NSP_DEBUG_RESTART BIT(12) #define NSP_DEBUG_SYNC BIT(13) #define NSP_DEBUG_WAIT BIT(14) #define NSP_DEBUG_TARGETFLAG BIT(15) #define NSP_DEBUG_PROC BIT(16) #define NSP_DEBUG_INIT BIT(17) #define NSP_DEBUG_DATA_IO BIT(18) #define NSP_SPECIAL_PRINT_REGISTER BIT(20) #define NSP_DEBUG_BUF_LEN 150 static inline void nsp_inc_resid(struct scsi_cmnd *SCpnt, int residInc) { scsi_set_resid(SCpnt, scsi_get_resid(SCpnt) + residInc); } __printf(4, 5) static void nsp_cs_message(const char *func, int line, char *type, char *fmt, ...) { va_list args; char buf[NSP_DEBUG_BUF_LEN]; va_start(args, fmt); vsnprintf(buf, sizeof(buf), fmt, args); va_end(args); #ifndef NSP_DEBUG printk("%snsp_cs: %s\n", type, buf); #else printk("%snsp_cs: %s (%d): %s\n", type, func, line, buf); #endif } #ifdef NSP_DEBUG static void nsp_cs_dmessage(const char *func, int line, int mask, char *fmt, ...) { va_list args; char buf[NSP_DEBUG_BUF_LEN]; va_start(args, fmt); vsnprintf(buf, sizeof(buf), fmt, args); va_end(args); if (mask & NSP_DEBUG_MASK) { printk("nsp_cs-debug: 0x%x %s (%d): %s\n", mask, func, line, buf); } } #endif /***********************************************************/ /*==================================================== * Clenaup parameters and call done() functions. * You must be set SCpnt->result before call this function. */ static void nsp_scsi_done(struct scsi_cmnd *SCpnt) { nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; data->CurrentSC = NULL; scsi_done(SCpnt); } static int nsp_queuecommand_lck(struct scsi_cmnd *const SCpnt) { struct scsi_pointer *scsi_pointer = nsp_priv(SCpnt); #ifdef NSP_DEBUG /*unsigned int host_id = SCpnt->device->host->this_id;*/ /*unsigned int base = SCpnt->device->host->io_port;*/ unsigned char target = scmd_id(SCpnt); #endif nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; nsp_dbg(NSP_DEBUG_QUEUECOMMAND, "SCpnt=0x%p target=%d lun=%llu sglist=0x%p bufflen=%d sg_count=%d", SCpnt, target, SCpnt->device->lun, scsi_sglist(SCpnt), scsi_bufflen(SCpnt), scsi_sg_count(SCpnt)); //nsp_dbg(NSP_DEBUG_QUEUECOMMAND, "before CurrentSC=0x%p", data->CurrentSC); if (data->CurrentSC != NULL) { nsp_msg(KERN_DEBUG, "CurrentSC!=NULL this can't be happen"); SCpnt->result = DID_BAD_TARGET << 16; nsp_scsi_done(SCpnt); return 0; } #if 0 /* XXX: pcmcia-cs generates SCSI command with "scsi_info" utility. This makes kernel crash when suspending... */ if (data->ScsiInfo->stop != 0) { nsp_msg(KERN_INFO, "suspending device. reject command."); SCpnt->result = DID_BAD_TARGET << 16; nsp_scsi_done(SCpnt); return SCSI_MLQUEUE_HOST_BUSY; } #endif show_command(SCpnt); data->CurrentSC = SCpnt; scsi_pointer->Status = SAM_STAT_CHECK_CONDITION; scsi_pointer->Message = 0; scsi_pointer->have_data_in = IO_UNKNOWN; scsi_pointer->sent_command = 0; scsi_pointer->phase = PH_UNDETERMINED; scsi_set_resid(SCpnt, scsi_bufflen(SCpnt)); /* setup scratch area SCp.ptr : buffer pointer SCp.this_residual : buffer length SCp.buffer : next buffer SCp.buffers_residual : left buffers in list SCp.phase : current state of the command */ if (scsi_bufflen(SCpnt)) { scsi_pointer->buffer = scsi_sglist(SCpnt); scsi_pointer->ptr = BUFFER_ADDR(SCpnt); scsi_pointer->this_residual = scsi_pointer->buffer->length; scsi_pointer->buffers_residual = scsi_sg_count(SCpnt) - 1; } else { scsi_pointer->ptr = NULL; scsi_pointer->this_residual = 0; scsi_pointer->buffer = NULL; scsi_pointer->buffers_residual = 0; } if (!nsphw_start_selection(SCpnt)) { nsp_dbg(NSP_DEBUG_QUEUECOMMAND, "selection fail"); SCpnt->result = DID_BUS_BUSY << 16; nsp_scsi_done(SCpnt); return 0; } //nsp_dbg(NSP_DEBUG_QUEUECOMMAND, "out"); #ifdef NSP_DEBUG data->CmdId++; #endif return 0; } static DEF_SCSI_QCMD(nsp_queuecommand) /* * setup PIO FIFO transfer mode and enable/disable to data out */ static void nsp_setup_fifo(nsp_hw_data *data, bool enabled) { unsigned int base = data->BaseAddress; unsigned char transfer_mode_reg; //nsp_dbg(NSP_DEBUG_DATA_IO, "enabled=%d", enabled); if (enabled) { transfer_mode_reg = TRANSFER_GO | BRAIND; } else { transfer_mode_reg = 0; } transfer_mode_reg |= data->TransferMode; nsp_index_write(base, TRANSFERMODE, transfer_mode_reg); } static void nsphw_init_sync(nsp_hw_data *data) { sync_data tmp_sync = { .SyncNegotiation = SYNC_NOT_YET, .SyncPeriod = 0, .SyncOffset = 0 }; int i; /* setup sync data */ for ( i = 0; i < ARRAY_SIZE(data->Sync); i++ ) { data->Sync[i] = tmp_sync; } } /* * Initialize Ninja hardware */ static void nsphw_init(nsp_hw_data *data) { unsigned int base = data->BaseAddress; nsp_dbg(NSP_DEBUG_INIT, "in base=0x%x", base); data->ScsiClockDiv = CLOCK_40M | FAST_20; data->CurrentSC = NULL; data->FifoCount = 0; data->TransferMode = MODE_IO8; nsphw_init_sync(data); /* block all interrupts */ nsp_write(base, IRQCONTROL, IRQCONTROL_ALLMASK); /* setup SCSI interface */ nsp_write(base, IFSELECT, IF_IFSEL); nsp_index_write(base, SCSIIRQMODE, 0); nsp_index_write(base, TRANSFERMODE, MODE_IO8); nsp_index_write(base, CLOCKDIV, data->ScsiClockDiv); nsp_index_write(base, PARITYCTRL, 0); nsp_index_write(base, POINTERCLR, POINTER_CLEAR | ACK_COUNTER_CLEAR | REQ_COUNTER_CLEAR | HOST_COUNTER_CLEAR); /* setup fifo asic */ nsp_write(base, IFSELECT, IF_REGSEL); nsp_index_write(base, TERMPWRCTRL, 0); if ((nsp_index_read(base, OTHERCONTROL) & TPWR_SENSE) == 0) { nsp_msg(KERN_INFO, "terminator power on"); nsp_index_write(base, TERMPWRCTRL, POWER_ON); } nsp_index_write(base, TIMERCOUNT, 0); nsp_index_write(base, TIMERCOUNT, 0); /* requires 2 times!! */ nsp_index_write(base, SYNCREG, 0); nsp_index_write(base, ACKWIDTH, 0); /* enable interrupts and ack them */ nsp_index_write(base, SCSIIRQMODE, SCSI_PHASE_CHANGE_EI | RESELECT_EI | SCSI_RESET_IRQ_EI ); nsp_write(base, IRQCONTROL, IRQCONTROL_ALLCLEAR); nsp_setup_fifo(data, false); } /* * Start selection phase */ static bool nsphw_start_selection(struct scsi_cmnd *const SCpnt) { struct scsi_pointer *scsi_pointer = nsp_priv(SCpnt); unsigned int host_id = SCpnt->device->host->this_id; unsigned int base = SCpnt->device->host->io_port; unsigned char target = scmd_id(SCpnt); nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; int time_out; unsigned char phase, arbit; //nsp_dbg(NSP_DEBUG_RESELECTION, "in"); phase = nsp_index_read(base, SCSIBUSMON); if(phase != BUSMON_BUS_FREE) { //nsp_dbg(NSP_DEBUG_RESELECTION, "bus busy"); return false; } /* start arbitration */ //nsp_dbg(NSP_DEBUG_RESELECTION, "start arbit"); scsi_pointer->phase = PH_ARBSTART; nsp_index_write(base, SETARBIT, ARBIT_GO); time_out = 1000; do { /* XXX: what a stupid chip! */ arbit = nsp_index_read(base, ARBITSTATUS); //nsp_dbg(NSP_DEBUG_RESELECTION, "arbit=%d, wait_count=%d", arbit, wait_count); udelay(1); /* hold 1.2us */ } while((arbit & (ARBIT_WIN | ARBIT_FAIL)) == 0 && (time_out-- != 0)); if (!(arbit & ARBIT_WIN)) { //nsp_dbg(NSP_DEBUG_RESELECTION, "arbit fail"); nsp_index_write(base, SETARBIT, ARBIT_FLAG_CLEAR); return false; } /* assert select line */ //nsp_dbg(NSP_DEBUG_RESELECTION, "assert SEL line"); scsi_pointer->phase = PH_SELSTART; udelay(3); /* wait 2.4us */ nsp_index_write(base, SCSIDATALATCH, BIT(host_id) | BIT(target)); nsp_index_write(base, SCSIBUSCTRL, SCSI_SEL | SCSI_BSY | SCSI_ATN); udelay(2); /* wait >1.2us */ nsp_index_write(base, SCSIBUSCTRL, SCSI_SEL | SCSI_BSY | SCSI_DATAOUT_ENB | SCSI_ATN); nsp_index_write(base, SETARBIT, ARBIT_FLAG_CLEAR); /*udelay(1);*/ /* wait >90ns */ nsp_index_write(base, SCSIBUSCTRL, SCSI_SEL | SCSI_DATAOUT_ENB | SCSI_ATN); /* check selection timeout */ nsp_start_timer(SCpnt, 1000/51); data->SelectionTimeOut = 1; return true; } struct nsp_sync_table { unsigned int min_period; unsigned int max_period; unsigned int chip_period; unsigned int ack_width; }; static struct nsp_sync_table nsp_sync_table_40M[] = { {0x0c, 0x0c, 0x1, 0}, /* 20MB 50ns*/ {0x19, 0x19, 0x3, 1}, /* 10MB 100ns*/ {0x1a, 0x25, 0x5, 2}, /* 7.5MB 150ns*/ {0x26, 0x32, 0x7, 3}, /* 5MB 200ns*/ { 0, 0, 0, 0}, }; static struct nsp_sync_table nsp_sync_table_20M[] = { {0x19, 0x19, 0x1, 0}, /* 10MB 100ns*/ {0x1a, 0x25, 0x2, 0}, /* 7.5MB 150ns*/ {0x26, 0x32, 0x3, 1}, /* 5MB 200ns*/ { 0, 0, 0, 0}, }; /* * setup synchronous data transfer mode */ static int nsp_analyze_sdtr(struct scsi_cmnd *SCpnt) { unsigned char target = scmd_id(SCpnt); // unsigned char lun = SCpnt->device->lun; nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; sync_data *sync = &(data->Sync[target]); struct nsp_sync_table *sync_table; unsigned int period, offset; nsp_dbg(NSP_DEBUG_SYNC, "in"); period = sync->SyncPeriod; offset = sync->SyncOffset; nsp_dbg(NSP_DEBUG_SYNC, "period=0x%x, offset=0x%x", period, offset); if ((data->ScsiClockDiv & (BIT(0)|BIT(1))) == CLOCK_20M) { sync_table = nsp_sync_table_20M; } else { sync_table = nsp_sync_table_40M; } for (; sync_table->max_period != 0; sync_table++) { if ( period >= sync_table->min_period && period <= sync_table->max_period ) { break; } } if (period != 0 && sync_table->max_period == 0) { /* * No proper period/offset found */ nsp_dbg(NSP_DEBUG_SYNC, "no proper period/offset"); sync->SyncPeriod = 0; sync->SyncOffset = 0; sync->SyncRegister = 0; sync->AckWidth = 0; return false; } sync->SyncRegister = (sync_table->chip_period << SYNCREG_PERIOD_SHIFT) | (offset & SYNCREG_OFFSET_MASK); sync->AckWidth = sync_table->ack_width; nsp_dbg(NSP_DEBUG_SYNC, "sync_reg=0x%x, ack_width=0x%x", sync->SyncRegister, sync->AckWidth); return true; } /* * start ninja hardware timer */ static void nsp_start_timer(struct scsi_cmnd *SCpnt, int time) { unsigned int base = SCpnt->device->host->io_port; nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; //nsp_dbg(NSP_DEBUG_INTR, "in SCpnt=0x%p, time=%d", SCpnt, time); data->TimerCount = time; nsp_index_write(base, TIMERCOUNT, time); } /* * wait for bus phase change */ static int nsp_negate_signal(struct scsi_cmnd *SCpnt, unsigned char mask, char *str) { unsigned int base = SCpnt->device->host->io_port; unsigned char reg; int time_out; //nsp_dbg(NSP_DEBUG_INTR, "in"); time_out = 100; do { reg = nsp_index_read(base, SCSIBUSMON); if (reg == 0xff) { break; } } while ((--time_out != 0) && (reg & mask) != 0); if (time_out == 0) { nsp_msg(KERN_DEBUG, " %s signal off timeout", str); } return 0; } /* * expect Ninja Irq */ static int nsp_expect_signal(struct scsi_cmnd *SCpnt, unsigned char current_phase, unsigned char mask) { unsigned int base = SCpnt->device->host->io_port; int time_out; unsigned char phase, i_src; //nsp_dbg(NSP_DEBUG_INTR, "current_phase=0x%x, mask=0x%x", current_phase, mask); time_out = 100; do { phase = nsp_index_read(base, SCSIBUSMON); if (phase == 0xff) { //nsp_dbg(NSP_DEBUG_INTR, "ret -1"); return -1; } i_src = nsp_read(base, IRQSTATUS); if (i_src & IRQSTATUS_SCSI) { //nsp_dbg(NSP_DEBUG_INTR, "ret 0 found scsi signal"); return 0; } if ((phase & mask) != 0 && (phase & BUSMON_PHASE_MASK) == current_phase) { //nsp_dbg(NSP_DEBUG_INTR, "ret 1 phase=0x%x", phase); return 1; } } while(time_out-- != 0); //nsp_dbg(NSP_DEBUG_INTR, "timeout"); return -1; } /* * transfer SCSI message */ static int nsp_xfer(struct scsi_cmnd *const SCpnt, int phase) { struct scsi_pointer *scsi_pointer = nsp_priv(SCpnt); unsigned int base = SCpnt->device->host->io_port; nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; char *buf = data->MsgBuffer; int len = min(MSGBUF_SIZE, data->MsgLen); int ptr; int ret; //nsp_dbg(NSP_DEBUG_DATA_IO, "in"); for (ptr = 0; len > 0; len--, ptr++) { ret = nsp_expect_signal(SCpnt, phase, BUSMON_REQ); if (ret <= 0) { nsp_dbg(NSP_DEBUG_DATA_IO, "xfer quit"); return 0; } /* if last byte, negate ATN */ if (len == 1 && scsi_pointer->phase == PH_MSG_OUT) { nsp_index_write(base, SCSIBUSCTRL, AUTODIRECTION | ACKENB); } /* read & write message */ if (phase & BUSMON_IO) { nsp_dbg(NSP_DEBUG_DATA_IO, "read msg"); buf[ptr] = nsp_index_read(base, SCSIDATAWITHACK); } else { nsp_dbg(NSP_DEBUG_DATA_IO, "write msg"); nsp_index_write(base, SCSIDATAWITHACK, buf[ptr]); } nsp_negate_signal(SCpnt, BUSMON_ACK, "xfer<ack>"); } return len; } /* * get extra SCSI data from fifo */ static int nsp_dataphase_bypass(struct scsi_cmnd *const SCpnt) { struct scsi_pointer *scsi_pointer = nsp_priv(SCpnt); nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; unsigned int count; //nsp_dbg(NSP_DEBUG_DATA_IO, "in"); if (scsi_pointer->have_data_in != IO_IN) { return 0; } count = nsp_fifo_count(SCpnt); if (data->FifoCount == count) { //nsp_dbg(NSP_DEBUG_DATA_IO, "not use bypass quirk"); return 0; } /* * XXX: NSP_QUIRK * data phase skip only occures in case of SCSI_LOW_READ */ nsp_dbg(NSP_DEBUG_DATA_IO, "use bypass quirk"); scsi_pointer->phase = PH_DATA; nsp_pio_read(SCpnt); nsp_setup_fifo(data, false); return 0; } /* * accept reselection */ static void nsp_reselected(struct scsi_cmnd *SCpnt) { unsigned int base = SCpnt->device->host->io_port; unsigned int host_id = SCpnt->device->host->this_id; //nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; unsigned char bus_reg; unsigned char id_reg, tmp; int target; nsp_dbg(NSP_DEBUG_RESELECTION, "in"); id_reg = nsp_index_read(base, RESELECTID); tmp = id_reg & (~BIT(host_id)); target = 0; while(tmp != 0) { if (tmp & BIT(0)) { break; } tmp >>= 1; target++; } if (scmd_id(SCpnt) != target) { nsp_msg(KERN_ERR, "XXX: reselect ID must be %d in this implementation.", target); } nsp_negate_signal(SCpnt, BUSMON_SEL, "reselect<SEL>"); nsp_nexus(SCpnt); bus_reg = nsp_index_read(base, SCSIBUSCTRL) & ~(SCSI_BSY | SCSI_ATN); nsp_index_write(base, SCSIBUSCTRL, bus_reg); nsp_index_write(base, SCSIBUSCTRL, bus_reg | AUTODIRECTION | ACKENB); } /* * count how many data transferd */ static int nsp_fifo_count(struct scsi_cmnd *SCpnt) { unsigned int base = SCpnt->device->host->io_port; unsigned int count; unsigned int l, m, h; nsp_index_write(base, POINTERCLR, POINTER_CLEAR | ACK_COUNTER); l = nsp_index_read(base, TRANSFERCOUNT); m = nsp_index_read(base, TRANSFERCOUNT); h = nsp_index_read(base, TRANSFERCOUNT); nsp_index_read(base, TRANSFERCOUNT); /* required this! */ count = (h << 16) | (m << 8) | (l << 0); //nsp_dbg(NSP_DEBUG_DATA_IO, "count=0x%x", count); return count; } /* fifo size */ #define RFIFO_CRIT 64 #define WFIFO_CRIT 64 /* * read data in DATA IN phase */ static void nsp_pio_read(struct scsi_cmnd *const SCpnt) { struct scsi_pointer *scsi_pointer = nsp_priv(SCpnt); unsigned int base = SCpnt->device->host->io_port; unsigned long mmio_base = SCpnt->device->host->base; nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; long time_out; int ocount, res; unsigned char stat, fifo_stat; ocount = data->FifoCount; nsp_dbg(NSP_DEBUG_DATA_IO, "in SCpnt=0x%p resid=%d ocount=%d ptr=0x%p this_residual=%d buffers=0x%p nbuf=%d", SCpnt, scsi_get_resid(SCpnt), ocount, scsi_pointer->ptr, scsi_pointer->this_residual, scsi_pointer->buffer, scsi_pointer->buffers_residual); time_out = 1000; while ((time_out-- != 0) && (scsi_pointer->this_residual > 0 || scsi_pointer->buffers_residual > 0)) { stat = nsp_index_read(base, SCSIBUSMON); stat &= BUSMON_PHASE_MASK; res = nsp_fifo_count(SCpnt) - ocount; //nsp_dbg(NSP_DEBUG_DATA_IO, "ptr=0x%p this=0x%x ocount=0x%x res=0x%x", scsi_pointer->ptr, scsi_pointer->this_residual, ocount, res); if (res == 0) { /* if some data available ? */ if (stat == BUSPHASE_DATA_IN) { /* phase changed? */ //nsp_dbg(NSP_DEBUG_DATA_IO, " wait for data this=%d", scsi_pointer->this_residual); continue; } else { nsp_dbg(NSP_DEBUG_DATA_IO, "phase changed stat=0x%x", stat); break; } } fifo_stat = nsp_read(base, FIFOSTATUS); if ((fifo_stat & FIFOSTATUS_FULL_EMPTY) == 0 && stat == BUSPHASE_DATA_IN) { continue; } res = min(res, scsi_pointer->this_residual); switch (data->TransferMode) { case MODE_IO32: res &= ~(BIT(1)|BIT(0)); /* align 4 */ nsp_fifo32_read(base, scsi_pointer->ptr, res >> 2); break; case MODE_IO8: nsp_fifo8_read(base, scsi_pointer->ptr, res); break; case MODE_MEM32: res &= ~(BIT(1)|BIT(0)); /* align 4 */ nsp_mmio_fifo32_read(mmio_base, scsi_pointer->ptr, res >> 2); break; default: nsp_dbg(NSP_DEBUG_DATA_IO, "unknown read mode"); return; } nsp_inc_resid(SCpnt, -res); scsi_pointer->ptr += res; scsi_pointer->this_residual -= res; ocount += res; //nsp_dbg(NSP_DEBUG_DATA_IO, "ptr=0x%p this_residual=0x%x ocount=0x%x", scsi_pointer->ptr, scsi_pointer->this_residual, ocount); /* go to next scatter list if available */ if (scsi_pointer->this_residual == 0 && scsi_pointer->buffers_residual != 0 ) { //nsp_dbg(NSP_DEBUG_DATA_IO, "scatterlist next timeout=%d", time_out); scsi_pointer->buffers_residual--; scsi_pointer->buffer = sg_next(scsi_pointer->buffer); scsi_pointer->ptr = BUFFER_ADDR(SCpnt); scsi_pointer->this_residual = scsi_pointer->buffer->length; time_out = 1000; //nsp_dbg(NSP_DEBUG_DATA_IO, "page: 0x%p, off: 0x%x", scsi_pointer->buffer->page, scsi_pointer->buffer->offset); } } data->FifoCount = ocount; if (time_out < 0) { nsp_msg(KERN_DEBUG, "pio read timeout resid=%d this_residual=%d buffers_residual=%d", scsi_get_resid(SCpnt), scsi_pointer->this_residual, scsi_pointer->buffers_residual); } nsp_dbg(NSP_DEBUG_DATA_IO, "read ocount=0x%x", ocount); nsp_dbg(NSP_DEBUG_DATA_IO, "r cmd=%d resid=0x%x\n", data->CmdId, scsi_get_resid(SCpnt)); } /* * write data in DATA OUT phase */ static void nsp_pio_write(struct scsi_cmnd *SCpnt) { struct scsi_pointer *scsi_pointer = nsp_priv(SCpnt); unsigned int base = SCpnt->device->host->io_port; unsigned long mmio_base = SCpnt->device->host->base; nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; int time_out; int ocount, res; unsigned char stat; ocount = data->FifoCount; nsp_dbg(NSP_DEBUG_DATA_IO, "in fifocount=%d ptr=0x%p this_residual=%d buffers=0x%p nbuf=%d resid=0x%x", data->FifoCount, scsi_pointer->ptr, scsi_pointer->this_residual, scsi_pointer->buffer, scsi_pointer->buffers_residual, scsi_get_resid(SCpnt)); time_out = 1000; while ((time_out-- != 0) && (scsi_pointer->this_residual > 0 || scsi_pointer->buffers_residual > 0)) { stat = nsp_index_read(base, SCSIBUSMON); stat &= BUSMON_PHASE_MASK; if (stat != BUSPHASE_DATA_OUT) { res = ocount - nsp_fifo_count(SCpnt); nsp_dbg(NSP_DEBUG_DATA_IO, "phase changed stat=0x%x, res=%d\n", stat, res); /* Put back pointer */ nsp_inc_resid(SCpnt, res); scsi_pointer->ptr -= res; scsi_pointer->this_residual += res; ocount -= res; break; } res = ocount - nsp_fifo_count(SCpnt); if (res > 0) { /* write all data? */ nsp_dbg(NSP_DEBUG_DATA_IO, "wait for all data out. ocount=0x%x res=%d", ocount, res); continue; } res = min(scsi_pointer->this_residual, WFIFO_CRIT); //nsp_dbg(NSP_DEBUG_DATA_IO, "ptr=0x%p this=0x%x res=0x%x", scsi_pointer->ptr, scsi_pointer->this_residual, res); switch (data->TransferMode) { case MODE_IO32: res &= ~(BIT(1)|BIT(0)); /* align 4 */ nsp_fifo32_write(base, scsi_pointer->ptr, res >> 2); break; case MODE_IO8: nsp_fifo8_write(base, scsi_pointer->ptr, res); break; case MODE_MEM32: res &= ~(BIT(1)|BIT(0)); /* align 4 */ nsp_mmio_fifo32_write(mmio_base, scsi_pointer->ptr, res >> 2); break; default: nsp_dbg(NSP_DEBUG_DATA_IO, "unknown write mode"); break; } nsp_inc_resid(SCpnt, -res); scsi_pointer->ptr += res; scsi_pointer->this_residual -= res; ocount += res; /* go to next scatter list if available */ if (scsi_pointer->this_residual == 0 && scsi_pointer->buffers_residual != 0 ) { //nsp_dbg(NSP_DEBUG_DATA_IO, "scatterlist next"); scsi_pointer->buffers_residual--; scsi_pointer->buffer = sg_next(scsi_pointer->buffer); scsi_pointer->ptr = BUFFER_ADDR(SCpnt); scsi_pointer->this_residual = scsi_pointer->buffer->length; time_out = 1000; } } data->FifoCount = ocount; if (time_out < 0) { nsp_msg(KERN_DEBUG, "pio write timeout resid=0x%x", scsi_get_resid(SCpnt)); } nsp_dbg(NSP_DEBUG_DATA_IO, "write ocount=0x%x", ocount); nsp_dbg(NSP_DEBUG_DATA_IO, "w cmd=%d resid=0x%x\n", data->CmdId, scsi_get_resid(SCpnt)); } #undef RFIFO_CRIT #undef WFIFO_CRIT /* * setup synchronous/asynchronous data transfer mode */ static int nsp_nexus(struct scsi_cmnd *SCpnt) { unsigned int base = SCpnt->device->host->io_port; unsigned char target = scmd_id(SCpnt); // unsigned char lun = SCpnt->device->lun; nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; sync_data *sync = &(data->Sync[target]); //nsp_dbg(NSP_DEBUG_DATA_IO, "in SCpnt=0x%p", SCpnt); /* setup synch transfer registers */ nsp_index_write(base, SYNCREG, sync->SyncRegister); nsp_index_write(base, ACKWIDTH, sync->AckWidth); if (scsi_get_resid(SCpnt) % 4 != 0 || scsi_get_resid(SCpnt) <= PAGE_SIZE ) { data->TransferMode = MODE_IO8; } else if (nsp_burst_mode == BURST_MEM32) { data->TransferMode = MODE_MEM32; } else if (nsp_burst_mode == BURST_IO32) { data->TransferMode = MODE_IO32; } else { data->TransferMode = MODE_IO8; } /* setup pdma fifo */ nsp_setup_fifo(data, true); /* clear ack counter */ data->FifoCount = 0; nsp_index_write(base, POINTERCLR, POINTER_CLEAR | ACK_COUNTER_CLEAR | REQ_COUNTER_CLEAR | HOST_COUNTER_CLEAR); return 0; } #include "nsp_message.c" /* * interrupt handler */ static irqreturn_t nspintr(int irq, void *dev_id) { unsigned int base; unsigned char irq_status, irq_phase, phase; struct scsi_cmnd *tmpSC; struct scsi_pointer *scsi_pointer; unsigned char target, lun; unsigned int *sync_neg; int i, tmp; nsp_hw_data *data; //nsp_dbg(NSP_DEBUG_INTR, "dev_id=0x%p", dev_id); //nsp_dbg(NSP_DEBUG_INTR, "host=0x%p", ((scsi_info_t *)dev_id)->host); if ( dev_id != NULL && ((scsi_info_t *)dev_id)->host != NULL ) { scsi_info_t *info = (scsi_info_t *)dev_id; data = (nsp_hw_data *)info->host->hostdata; } else { nsp_dbg(NSP_DEBUG_INTR, "host data wrong"); return IRQ_NONE; } //nsp_dbg(NSP_DEBUG_INTR, "&nsp_data_base=0x%p, dev_id=0x%p", &nsp_data_base, dev_id); base = data->BaseAddress; //nsp_dbg(NSP_DEBUG_INTR, "base=0x%x", base); /* * interrupt check */ nsp_write(base, IRQCONTROL, IRQCONTROL_IRQDISABLE); irq_status = nsp_read(base, IRQSTATUS); //nsp_dbg(NSP_DEBUG_INTR, "irq_status=0x%x", irq_status); if ((irq_status == 0xff) || ((irq_status & IRQSTATUS_MASK) == 0)) { nsp_write(base, IRQCONTROL, 0); //nsp_dbg(NSP_DEBUG_INTR, "no irq/shared irq"); return IRQ_NONE; } /* XXX: IMPORTANT * Do not read an irq_phase register if no scsi phase interrupt. * Unless, you should lose a scsi phase interrupt. */ phase = nsp_index_read(base, SCSIBUSMON); if((irq_status & IRQSTATUS_SCSI) != 0) { irq_phase = nsp_index_read(base, IRQPHASESENCE); } else { irq_phase = 0; } //nsp_dbg(NSP_DEBUG_INTR, "irq_phase=0x%x", irq_phase); /* * timer interrupt handler (scsi vs timer interrupts) */ //nsp_dbg(NSP_DEBUG_INTR, "timercount=%d", data->TimerCount); if (data->TimerCount != 0) { //nsp_dbg(NSP_DEBUG_INTR, "stop timer"); nsp_index_write(base, TIMERCOUNT, 0); nsp_index_write(base, TIMERCOUNT, 0); data->TimerCount = 0; } if ((irq_status & IRQSTATUS_MASK) == IRQSTATUS_TIMER && data->SelectionTimeOut == 0) { //nsp_dbg(NSP_DEBUG_INTR, "timer start"); nsp_write(base, IRQCONTROL, IRQCONTROL_TIMER_CLEAR); return IRQ_HANDLED; } nsp_write(base, IRQCONTROL, IRQCONTROL_TIMER_CLEAR | IRQCONTROL_FIFO_CLEAR); if ((irq_status & IRQSTATUS_SCSI) && (irq_phase & SCSI_RESET_IRQ)) { nsp_msg(KERN_ERR, "bus reset (power off?)"); nsphw_init(data); nsp_bus_reset(data); if(data->CurrentSC != NULL) { tmpSC = data->CurrentSC; scsi_pointer = nsp_priv(tmpSC); tmpSC->result = (DID_RESET << 16) | ((scsi_pointer->Message & 0xff) << 8) | ((scsi_pointer->Status & 0xff) << 0); nsp_scsi_done(tmpSC); } return IRQ_HANDLED; } if (data->CurrentSC == NULL) { nsp_msg(KERN_ERR, "CurrentSC==NULL irq_status=0x%x phase=0x%x irq_phase=0x%x this can't be happen. reset everything", irq_status, phase, irq_phase); nsphw_init(data); nsp_bus_reset(data); return IRQ_HANDLED; } tmpSC = data->CurrentSC; scsi_pointer = nsp_priv(tmpSC); target = tmpSC->device->id; lun = tmpSC->device->lun; sync_neg = &(data->Sync[target].SyncNegotiation); /* * parse hardware SCSI irq reasons register */ if (irq_status & IRQSTATUS_SCSI) { if (irq_phase & RESELECT_IRQ) { nsp_dbg(NSP_DEBUG_INTR, "reselect"); nsp_write(base, IRQCONTROL, IRQCONTROL_RESELECT_CLEAR); nsp_reselected(tmpSC); return IRQ_HANDLED; } if ((irq_phase & (PHASE_CHANGE_IRQ | LATCHED_BUS_FREE)) == 0) { return IRQ_HANDLED; } } //show_phase(tmpSC); switch (scsi_pointer->phase) { case PH_SELSTART: // *sync_neg = SYNC_NOT_YET; if ((phase & BUSMON_BSY) == 0) { //nsp_dbg(NSP_DEBUG_INTR, "selection count=%d", data->SelectionTimeOut); if (data->SelectionTimeOut >= NSP_SELTIMEOUT) { nsp_dbg(NSP_DEBUG_INTR, "selection time out"); data->SelectionTimeOut = 0; nsp_index_write(base, SCSIBUSCTRL, 0); tmpSC->result = DID_TIME_OUT << 16; nsp_scsi_done(tmpSC); return IRQ_HANDLED; } data->SelectionTimeOut += 1; nsp_start_timer(tmpSC, 1000/51); return IRQ_HANDLED; } /* attention assert */ //nsp_dbg(NSP_DEBUG_INTR, "attention assert"); data->SelectionTimeOut = 0; scsi_pointer->phase = PH_SELECTED; nsp_index_write(base, SCSIBUSCTRL, SCSI_ATN); udelay(1); nsp_index_write(base, SCSIBUSCTRL, SCSI_ATN | AUTODIRECTION | ACKENB); return IRQ_HANDLED; case PH_RESELECT: //nsp_dbg(NSP_DEBUG_INTR, "phase reselect"); // *sync_neg = SYNC_NOT_YET; if ((phase & BUSMON_PHASE_MASK) != BUSPHASE_MESSAGE_IN) { tmpSC->result = DID_ABORT << 16; nsp_scsi_done(tmpSC); return IRQ_HANDLED; } fallthrough; default: if ((irq_status & (IRQSTATUS_SCSI | IRQSTATUS_FIFO)) == 0) { return IRQ_HANDLED; } break; } /* * SCSI sequencer */ //nsp_dbg(NSP_DEBUG_INTR, "start scsi seq"); /* normal disconnect */ if ((scsi_pointer->phase == PH_MSG_IN || scsi_pointer->phase == PH_MSG_OUT) && (irq_phase & LATCHED_BUS_FREE) != 0) { nsp_dbg(NSP_DEBUG_INTR, "normal disconnect irq_status=0x%x, phase=0x%x, irq_phase=0x%x", irq_status, phase, irq_phase); //*sync_neg = SYNC_NOT_YET; /* all command complete and return status */ if (scsi_pointer->Message == COMMAND_COMPLETE) { tmpSC->result = (DID_OK << 16) | ((scsi_pointer->Message & 0xff) << 8) | ((scsi_pointer->Status & 0xff) << 0); nsp_dbg(NSP_DEBUG_INTR, "command complete result=0x%x", tmpSC->result); nsp_scsi_done(tmpSC); return IRQ_HANDLED; } return IRQ_HANDLED; } /* check unexpected bus free state */ if (phase == 0) { nsp_msg(KERN_DEBUG, "unexpected bus free. irq_status=0x%x, phase=0x%x, irq_phase=0x%x", irq_status, phase, irq_phase); *sync_neg = SYNC_NG; tmpSC->result = DID_ERROR << 16; nsp_scsi_done(tmpSC); return IRQ_HANDLED; } switch (phase & BUSMON_PHASE_MASK) { case BUSPHASE_COMMAND: nsp_dbg(NSP_DEBUG_INTR, "BUSPHASE_COMMAND"); if ((phase & BUSMON_REQ) == 0) { nsp_dbg(NSP_DEBUG_INTR, "REQ == 0"); return IRQ_HANDLED; } scsi_pointer->phase = PH_COMMAND; nsp_nexus(tmpSC); /* write scsi command */ nsp_dbg(NSP_DEBUG_INTR, "cmd_len=%d", tmpSC->cmd_len); nsp_index_write(base, COMMANDCTRL, CLEAR_COMMAND_POINTER); for (i = 0; i < tmpSC->cmd_len; i++) { nsp_index_write(base, COMMANDDATA, tmpSC->cmnd[i]); } nsp_index_write(base, COMMANDCTRL, CLEAR_COMMAND_POINTER | AUTO_COMMAND_GO); break; case BUSPHASE_DATA_OUT: nsp_dbg(NSP_DEBUG_INTR, "BUSPHASE_DATA_OUT"); scsi_pointer->phase = PH_DATA; scsi_pointer->have_data_in = IO_OUT; nsp_pio_write(tmpSC); break; case BUSPHASE_DATA_IN: nsp_dbg(NSP_DEBUG_INTR, "BUSPHASE_DATA_IN"); scsi_pointer->phase = PH_DATA; scsi_pointer->have_data_in = IO_IN; nsp_pio_read(tmpSC); break; case BUSPHASE_STATUS: nsp_dataphase_bypass(tmpSC); nsp_dbg(NSP_DEBUG_INTR, "BUSPHASE_STATUS"); scsi_pointer->phase = PH_STATUS; scsi_pointer->Status = nsp_index_read(base, SCSIDATAWITHACK); nsp_dbg(NSP_DEBUG_INTR, "message=0x%x status=0x%x", scsi_pointer->Message, scsi_pointer->Status); break; case BUSPHASE_MESSAGE_OUT: nsp_dbg(NSP_DEBUG_INTR, "BUSPHASE_MESSAGE_OUT"); if ((phase & BUSMON_REQ) == 0) { goto timer_out; } scsi_pointer->phase = PH_MSG_OUT; //*sync_neg = SYNC_NOT_YET; data->MsgLen = i = 0; data->MsgBuffer[i] = IDENTIFY(true, lun); i++; if (*sync_neg == SYNC_NOT_YET) { data->Sync[target].SyncPeriod = 0; data->Sync[target].SyncOffset = 0; /**/ data->MsgBuffer[i] = EXTENDED_MESSAGE; i++; data->MsgBuffer[i] = 3; i++; data->MsgBuffer[i] = EXTENDED_SDTR; i++; data->MsgBuffer[i] = 0x0c; i++; data->MsgBuffer[i] = 15; i++; /**/ } data->MsgLen = i; nsp_analyze_sdtr(tmpSC); show_message(data); nsp_message_out(tmpSC); break; case BUSPHASE_MESSAGE_IN: nsp_dataphase_bypass(tmpSC); nsp_dbg(NSP_DEBUG_INTR, "BUSPHASE_MESSAGE_IN"); if ((phase & BUSMON_REQ) == 0) { goto timer_out; } scsi_pointer->phase = PH_MSG_IN; nsp_message_in(tmpSC); /**/ if (*sync_neg == SYNC_NOT_YET) { //nsp_dbg(NSP_DEBUG_INTR, "sync target=%d,lun=%d",target,lun); if (data->MsgLen >= 5 && data->MsgBuffer[0] == EXTENDED_MESSAGE && data->MsgBuffer[1] == 3 && data->MsgBuffer[2] == EXTENDED_SDTR ) { data->Sync[target].SyncPeriod = data->MsgBuffer[3]; data->Sync[target].SyncOffset = data->MsgBuffer[4]; //nsp_dbg(NSP_DEBUG_INTR, "sync ok, %d %d", data->MsgBuffer[3], data->MsgBuffer[4]); *sync_neg = SYNC_OK; } else { data->Sync[target].SyncPeriod = 0; data->Sync[target].SyncOffset = 0; *sync_neg = SYNC_NG; } nsp_analyze_sdtr(tmpSC); } /**/ /* search last messeage byte */ tmp = -1; for (i = 0; i < data->MsgLen; i++) { tmp = data->MsgBuffer[i]; if (data->MsgBuffer[i] == EXTENDED_MESSAGE) { i += (1 + data->MsgBuffer[i+1]); } } scsi_pointer->Message = tmp; nsp_dbg(NSP_DEBUG_INTR, "message=0x%x len=%d", scsi_pointer->Message, data->MsgLen); show_message(data); break; case BUSPHASE_SELECT: default: nsp_dbg(NSP_DEBUG_INTR, "BUSPHASE other"); break; } //nsp_dbg(NSP_DEBUG_INTR, "out"); return IRQ_HANDLED; timer_out: nsp_start_timer(tmpSC, 1000/102); return IRQ_HANDLED; } #ifdef NSP_DEBUG #include "nsp_debug.c" #endif /* NSP_DEBUG */ /*----------------------------------------------------------------*/ /* look for ninja3 card and init if found */ /*----------------------------------------------------------------*/ static struct Scsi_Host *nsp_detect(struct scsi_host_template *sht) { struct Scsi_Host *host; /* registered host structure */ nsp_hw_data *data_b = &nsp_data_base, *data; nsp_dbg(NSP_DEBUG_INIT, "this_id=%d", sht->this_id); host = scsi_host_alloc(&nsp_driver_template, sizeof(nsp_hw_data)); if (host == NULL) { nsp_dbg(NSP_DEBUG_INIT, "host failed"); return NULL; } memcpy(host->hostdata, data_b, sizeof(nsp_hw_data)); data = (nsp_hw_data *)host->hostdata; data->ScsiInfo->host = host; #ifdef NSP_DEBUG data->CmdId = 0; #endif nsp_dbg(NSP_DEBUG_INIT, "irq=%d,%d", data_b->IrqNumber, ((nsp_hw_data *)host->hostdata)->IrqNumber); host->unique_id = data->BaseAddress; host->io_port = data->BaseAddress; host->n_io_port = data->NumAddress; host->irq = data->IrqNumber; host->base = data->MmioAddress; spin_lock_init(&(data->Lock)); snprintf(data->nspinfo, sizeof(data->nspinfo), "NinjaSCSI-3/32Bi Driver $Revision: 1.23 $ IO:0x%04lx-0x%04lx MMIO(virt addr):0x%04lx IRQ:%02d", host->io_port, host->io_port + host->n_io_port - 1, host->base, host->irq); sht->name = data->nspinfo; nsp_dbg(NSP_DEBUG_INIT, "end"); return host; /* detect done. */ } /*----------------------------------------------------------------*/ /* return info string */ /*----------------------------------------------------------------*/ static const char *nsp_info(struct Scsi_Host *shpnt) { nsp_hw_data *data = (nsp_hw_data *)shpnt->hostdata; return data->nspinfo; } static int nsp_show_info(struct seq_file *m, struct Scsi_Host *host) { int id; int speed; unsigned long flags; nsp_hw_data *data; int hostno; hostno = host->host_no; data = (nsp_hw_data *)host->hostdata; seq_puts(m, "NinjaSCSI status\n\n" "Driver version: $Revision: 1.23 $\n"); seq_printf(m, "SCSI host No.: %d\n", hostno); seq_printf(m, "IRQ: %d\n", host->irq); seq_printf(m, "IO: 0x%lx-0x%lx\n", host->io_port, host->io_port + host->n_io_port - 1); seq_printf(m, "MMIO(virtual address): 0x%lx-0x%lx\n", host->base, host->base + data->MmioLength - 1); seq_printf(m, "sg_tablesize: %d\n", host->sg_tablesize); seq_puts(m, "burst transfer mode: "); switch (nsp_burst_mode) { case BURST_IO8: seq_puts(m, "io8"); break; case BURST_IO32: seq_puts(m, "io32"); break; case BURST_MEM32: seq_puts(m, "mem32"); break; default: seq_puts(m, "???"); break; } seq_putc(m, '\n'); spin_lock_irqsave(&(data->Lock), flags); seq_printf(m, "CurrentSC: 0x%p\n\n", data->CurrentSC); spin_unlock_irqrestore(&(data->Lock), flags); seq_puts(m, "SDTR status\n"); for(id = 0; id < ARRAY_SIZE(data->Sync); id++) { seq_printf(m, "id %d: ", id); if (id == host->this_id) { seq_puts(m, "----- NinjaSCSI-3 host adapter\n"); continue; } switch(data->Sync[id].SyncNegotiation) { case SYNC_OK: seq_puts(m, " sync"); break; case SYNC_NG: seq_puts(m, "async"); break; case SYNC_NOT_YET: seq_puts(m, " none"); break; default: seq_puts(m, "?????"); break; } if (data->Sync[id].SyncPeriod != 0) { speed = 1000000 / (data->Sync[id].SyncPeriod * 4); seq_printf(m, " transfer %d.%dMB/s, offset %d", speed / 1000, speed % 1000, data->Sync[id].SyncOffset ); } seq_putc(m, '\n'); } return 0; } /*---------------------------------------------------------------*/ /* error handler */ /*---------------------------------------------------------------*/ /* static int nsp_eh_abort(struct scsi_cmnd *SCpnt) { nsp_dbg(NSP_DEBUG_BUSRESET, "SCpnt=0x%p", SCpnt); return nsp_eh_bus_reset(SCpnt); }*/ static int nsp_bus_reset(nsp_hw_data *data) { unsigned int base = data->BaseAddress; int i; nsp_write(base, IRQCONTROL, IRQCONTROL_ALLMASK); nsp_index_write(base, SCSIBUSCTRL, SCSI_RST); mdelay(100); /* 100ms */ nsp_index_write(base, SCSIBUSCTRL, 0); for(i = 0; i < 5; i++) { nsp_index_read(base, IRQPHASESENCE); /* dummy read */ } nsphw_init_sync(data); nsp_write(base, IRQCONTROL, IRQCONTROL_ALLCLEAR); return SUCCESS; } static int nsp_eh_bus_reset(struct scsi_cmnd *SCpnt) { nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; nsp_dbg(NSP_DEBUG_BUSRESET, "SCpnt=0x%p", SCpnt); return nsp_bus_reset(data); } static int nsp_eh_host_reset(struct scsi_cmnd *SCpnt) { nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; nsp_dbg(NSP_DEBUG_BUSRESET, "in"); nsphw_init(data); return SUCCESS; } /********************************************************************** PCMCIA functions **********************************************************************/ static int nsp_cs_probe(struct pcmcia_device *link) { scsi_info_t *info; nsp_hw_data *data = &nsp_data_base; int ret; nsp_dbg(NSP_DEBUG_INIT, "in"); /* Create new SCSI device */ info = kzalloc(sizeof(*info), GFP_KERNEL); if (info == NULL) { return -ENOMEM; } info->p_dev = link; link->priv = info; data->ScsiInfo = info; nsp_dbg(NSP_DEBUG_INIT, "info=0x%p", info); ret = nsp_cs_config(link); nsp_dbg(NSP_DEBUG_INIT, "link=0x%p", link); return ret; } /* nsp_cs_attach */ static void nsp_cs_detach(struct pcmcia_device *link) { nsp_dbg(NSP_DEBUG_INIT, "in, link=0x%p", link); ((scsi_info_t *)link->priv)->stop = 1; nsp_cs_release(link); kfree(link->priv); link->priv = NULL; } /* nsp_cs_detach */ static int nsp_cs_config_check(struct pcmcia_device *p_dev, void *priv_data) { nsp_hw_data *data = priv_data; if (p_dev->config_index == 0) return -ENODEV; /* This reserves IO space but doesn't actually enable it */ if (pcmcia_request_io(p_dev) != 0) goto next_entry; if (resource_size(p_dev->resource[2])) { p_dev->resource[2]->flags |= (WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM | WIN_ENABLE); if (p_dev->resource[2]->end < 0x1000) p_dev->resource[2]->end = 0x1000; if (pcmcia_request_window(p_dev, p_dev->resource[2], 0) != 0) goto next_entry; if (pcmcia_map_mem_page(p_dev, p_dev->resource[2], p_dev->card_addr) != 0) goto next_entry; data->MmioAddress = (unsigned long) ioremap(p_dev->resource[2]->start, resource_size(p_dev->resource[2])); if (!data->MmioAddress) goto next_entry; data->MmioLength = resource_size(p_dev->resource[2]); } /* If we got this far, we're cool! */ return 0; next_entry: nsp_dbg(NSP_DEBUG_INIT, "next"); pcmcia_disable_device(p_dev); return -ENODEV; } static int nsp_cs_config(struct pcmcia_device *link) { int ret; scsi_info_t *info = link->priv; struct Scsi_Host *host; nsp_hw_data *data = &nsp_data_base; nsp_dbg(NSP_DEBUG_INIT, "in"); link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_CHECK_VCC | CONF_AUTO_SET_VPP | CONF_AUTO_AUDIO | CONF_AUTO_SET_IOMEM | CONF_AUTO_SET_IO; ret = pcmcia_loop_config(link, nsp_cs_config_check, data); if (ret) goto cs_failed; if (pcmcia_request_irq(link, nspintr)) goto cs_failed; ret = pcmcia_enable_device(link); if (ret) goto cs_failed; if (free_ports) { if (link->resource[0]) { release_region(link->resource[0]->start, resource_size(link->resource[0])); } if (link->resource[1]) { release_region(link->resource[1]->start, resource_size(link->resource[1])); } } /* Set port and IRQ */ data->BaseAddress = link->resource[0]->start; data->NumAddress = resource_size(link->resource[0]); data->IrqNumber = link->irq; nsp_dbg(NSP_DEBUG_INIT, "I/O[0x%x+0x%x] IRQ %d", data->BaseAddress, data->NumAddress, data->IrqNumber); nsphw_init(data); host = nsp_detect(&nsp_driver_template); if (host == NULL) { nsp_dbg(NSP_DEBUG_INIT, "detect failed"); goto cs_failed; } ret = scsi_add_host (host, NULL); if (ret) goto cs_failed; scsi_scan_host(host); info->host = host; return 0; cs_failed: nsp_dbg(NSP_DEBUG_INIT, "config fail"); nsp_cs_release(link); return -ENODEV; } /* nsp_cs_config */ static void nsp_cs_release(struct pcmcia_device *link) { scsi_info_t *info = link->priv; nsp_hw_data *data = NULL; if (info->host == NULL) { nsp_msg(KERN_DEBUG, "unexpected card release call."); } else { data = (nsp_hw_data *)info->host->hostdata; } nsp_dbg(NSP_DEBUG_INIT, "link=0x%p", link); /* Unlink the device chain */ if (info->host != NULL) { scsi_remove_host(info->host); } if (resource_size(link->resource[2])) { if (data != NULL) { iounmap((void *)(data->MmioAddress)); } } pcmcia_disable_device(link); if (info->host != NULL) { scsi_host_put(info->host); } } /* nsp_cs_release */ static int nsp_cs_suspend(struct pcmcia_device *link) { scsi_info_t *info = link->priv; nsp_hw_data *data; nsp_dbg(NSP_DEBUG_INIT, "event: suspend"); if (info->host != NULL) { nsp_msg(KERN_INFO, "clear SDTR status"); data = (nsp_hw_data *)info->host->hostdata; nsphw_init_sync(data); } info->stop = 1; return 0; } static int nsp_cs_resume(struct pcmcia_device *link) { scsi_info_t *info = link->priv; nsp_hw_data *data; nsp_dbg(NSP_DEBUG_INIT, "event: resume"); info->stop = 0; if (info->host != NULL) { nsp_msg(KERN_INFO, "reset host and bus"); data = (nsp_hw_data *)info->host->hostdata; nsphw_init (data); nsp_bus_reset(data); } return 0; } /*======================================================================* * module entry point *====================================================================*/ static const struct pcmcia_device_id nsp_cs_ids[] = { PCMCIA_DEVICE_PROD_ID123("IO DATA", "CBSC16 ", "1", 0x547e66dc, 0x0d63a3fd, 0x51de003a), PCMCIA_DEVICE_PROD_ID123("KME ", "SCSI-CARD-001", "1", 0x534c02bc, 0x52008408, 0x51de003a), PCMCIA_DEVICE_PROD_ID123("KME ", "SCSI-CARD-002", "1", 0x534c02bc, 0xcb09d5b2, 0x51de003a), PCMCIA_DEVICE_PROD_ID123("KME ", "SCSI-CARD-003", "1", 0x534c02bc, 0xbc0ee524, 0x51de003a), PCMCIA_DEVICE_PROD_ID123("KME ", "SCSI-CARD-004", "1", 0x534c02bc, 0x226a7087, 0x51de003a), PCMCIA_DEVICE_PROD_ID123("WBT", "NinjaSCSI-3", "R1.0", 0xc7ba805f, 0xfdc7c97d, 0x6973710e), PCMCIA_DEVICE_PROD_ID123("WORKBIT", "UltraNinja-16", "1", 0x28191418, 0xb70f4b09, 0x51de003a), PCMCIA_DEVICE_NULL }; MODULE_DEVICE_TABLE(pcmcia, nsp_cs_ids); static struct pcmcia_driver nsp_driver = { .owner = THIS_MODULE, .name = "nsp_cs", .probe = nsp_cs_probe, .remove = nsp_cs_detach, .id_table = nsp_cs_ids, .suspend = nsp_cs_suspend, .resume = nsp_cs_resume, }; module_pcmcia_driver(nsp_driver); /* end */
linux-master
drivers/scsi/pcmcia/nsp_cs.c
/*========================================================================== NinjaSCSI-3 message handler By: YOKOTA Hiroshi <[email protected]> This software may be used and distributed according to the terms of the GNU General Public License. */ /* $Id: nsp_message.c,v 1.6 2003/07/26 14:21:09 elca Exp $ */ static void nsp_message_in(struct scsi_cmnd *SCpnt) { unsigned int base = SCpnt->device->host->io_port; nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; unsigned char data_reg, control_reg; int ret, len; /* * XXX: NSP QUIRK * NSP invoke interrupts only in the case of scsi phase changes, * therefore we should poll the scsi phase here to catch * the next "msg in" if exists (no scsi phase changes). */ ret = 16; len = 0; nsp_dbg(NSP_DEBUG_MSGINOCCUR, "msgin loop"); do { /* read data */ data_reg = nsp_index_read(base, SCSIDATAIN); /* assert ACK */ control_reg = nsp_index_read(base, SCSIBUSCTRL); control_reg |= SCSI_ACK; nsp_index_write(base, SCSIBUSCTRL, control_reg); nsp_negate_signal(SCpnt, BUSMON_REQ, "msgin<REQ>"); data->MsgBuffer[len] = data_reg; len++; /* deassert ACK */ control_reg = nsp_index_read(base, SCSIBUSCTRL); control_reg &= ~SCSI_ACK; nsp_index_write(base, SCSIBUSCTRL, control_reg); /* catch a next signal */ ret = nsp_expect_signal(SCpnt, BUSPHASE_MESSAGE_IN, BUSMON_REQ); } while (ret > 0 && MSGBUF_SIZE > len); data->MsgLen = len; } static void nsp_message_out(struct scsi_cmnd *SCpnt) { nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; int ret = 1; int len = data->MsgLen; /* * XXX: NSP QUIRK * NSP invoke interrupts only in the case of scsi phase changes, * therefore we should poll the scsi phase here to catch * the next "msg out" if exists (no scsi phase changes). */ nsp_dbg(NSP_DEBUG_MSGOUTOCCUR, "msgout loop"); do { if (nsp_xfer(SCpnt, BUSPHASE_MESSAGE_OUT)) { nsp_msg(KERN_DEBUG, "msgout: xfer short"); } /* catch a next signal */ ret = nsp_expect_signal(SCpnt, BUSPHASE_MESSAGE_OUT, BUSMON_REQ); } while (ret > 0 && len-- > 0); } /* end */
linux-master
drivers/scsi/pcmcia/nsp_message.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * sym53c500_cs.c Bob Tracy ([email protected]) * * A rewrite of the pcmcia-cs add-on driver for newer (circa 1997) * New Media Bus Toaster PCMCIA SCSI cards using the Symbios Logic * 53c500 controller: intended for use with 2.6 and later kernels. * The pcmcia-cs add-on version of this driver is not supported * beyond 2.4. It consisted of three files with history/copyright * information as follows: * * SYM53C500.h * Bob Tracy ([email protected]) * Original by Tom Corner ([email protected]). * Adapted from NCR53c406a.h which is Copyrighted (C) 1994 * Normunds Saumanis ([email protected]) * * SYM53C500.c * Bob Tracy ([email protected]) * Original driver by Tom Corner ([email protected]) was adapted * from NCR53c406a.c which is Copyrighted (C) 1994, 1995, 1996 * Normunds Saumanis ([email protected]) * * sym53c500.c * Bob Tracy ([email protected]) * Original by Tom Corner ([email protected]) was adapted from a * driver for the Qlogic SCSI card written by * David Hinds ([email protected]). */ #define SYM53C500_DEBUG 0 #define VERBOSE_SYM53C500_DEBUG 0 /* * Set this to 0 if you encounter kernel lockups while transferring * data in PIO mode. Note this can be changed via "sysfs". */ #define USE_FAST_PIO 1 /* =============== End of user configurable parameters ============== */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/ioport.h> #include <linux/blkdev.h> #include <linux/spinlock.h> #include <linux/bitops.h> #include <asm/io.h> #include <asm/dma.h> #include <asm/irq.h> #include <scsi/scsi_ioctl.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> #include <pcmcia/ciscode.h> /* ================================================================== */ #define SYNC_MODE 0 /* Synchronous transfer mode */ /* Default configuration */ #define C1_IMG 0x07 /* ID=7 */ #define C2_IMG 0x48 /* FE SCSI2 */ #define C3_IMG 0x20 /* CDB */ #define C4_IMG 0x04 /* ANE */ #define C5_IMG 0xa4 /* ? changed from b6= AA PI SIE POL */ #define C7_IMG 0x80 /* added for SYM53C500 t. corner */ /* Hardware Registers: offsets from io_port (base) */ /* Control Register Set 0 */ #define TC_LSB 0x00 /* transfer counter lsb */ #define TC_MSB 0x01 /* transfer counter msb */ #define SCSI_FIFO 0x02 /* scsi fifo register */ #define CMD_REG 0x03 /* command register */ #define STAT_REG 0x04 /* status register */ #define DEST_ID 0x04 /* selection/reselection bus id */ #define INT_REG 0x05 /* interrupt status register */ #define SRTIMOUT 0x05 /* select/reselect timeout reg */ #define SEQ_REG 0x06 /* sequence step register */ #define SYNCPRD 0x06 /* synchronous transfer period */ #define FIFO_FLAGS 0x07 /* indicates # of bytes in fifo */ #define SYNCOFF 0x07 /* synchronous offset register */ #define CONFIG1 0x08 /* configuration register */ #define CLKCONV 0x09 /* clock conversion register */ /* #define TESTREG 0x0A */ /* test mode register */ #define CONFIG2 0x0B /* configuration 2 register */ #define CONFIG3 0x0C /* configuration 3 register */ #define CONFIG4 0x0D /* configuration 4 register */ #define TC_HIGH 0x0E /* transfer counter high */ /* #define FIFO_BOTTOM 0x0F */ /* reserve FIFO byte register */ /* Control Register Set 1 */ /* #define JUMPER_SENSE 0x00 */ /* jumper sense port reg (r/w) */ /* #define SRAM_PTR 0x01 */ /* SRAM address pointer reg (r/w) */ /* #define SRAM_DATA 0x02 */ /* SRAM data register (r/w) */ #define PIO_FIFO 0x04 /* PIO FIFO registers (r/w) */ /* #define PIO_FIFO1 0x05 */ /* */ /* #define PIO_FIFO2 0x06 */ /* */ /* #define PIO_FIFO3 0x07 */ /* */ #define PIO_STATUS 0x08 /* PIO status (r/w) */ /* #define ATA_CMD 0x09 */ /* ATA command/status reg (r/w) */ /* #define ATA_ERR 0x0A */ /* ATA features/error reg (r/w) */ #define PIO_FLAG 0x0B /* PIO flag interrupt enable (r/w) */ #define CONFIG5 0x09 /* configuration 5 register */ /* #define SIGNATURE 0x0E */ /* signature register (r) */ /* #define CONFIG6 0x0F */ /* configuration 6 register (r) */ #define CONFIG7 0x0d /* select register set 0 */ #define REG0(x) (outb(C4_IMG, (x) + CONFIG4)) /* select register set 1 */ #define REG1(x) outb(C7_IMG, (x) + CONFIG7); outb(C5_IMG, (x) + CONFIG5) #if SYM53C500_DEBUG #define DEB(x) x #else #define DEB(x) #endif #if VERBOSE_SYM53C500_DEBUG #define VDEB(x) x #else #define VDEB(x) #endif #define LOAD_DMA_COUNT(x, count) \ outb(count & 0xff, (x) + TC_LSB); \ outb((count >> 8) & 0xff, (x) + TC_MSB); \ outb((count >> 16) & 0xff, (x) + TC_HIGH); /* Chip commands */ #define DMA_OP 0x80 #define SCSI_NOP 0x00 #define FLUSH_FIFO 0x01 #define CHIP_RESET 0x02 #define SCSI_RESET 0x03 #define RESELECT 0x40 #define SELECT_NO_ATN 0x41 #define SELECT_ATN 0x42 #define SELECT_ATN_STOP 0x43 #define ENABLE_SEL 0x44 #define DISABLE_SEL 0x45 #define SELECT_ATN3 0x46 #define RESELECT3 0x47 #define TRANSFER_INFO 0x10 #define INIT_CMD_COMPLETE 0x11 #define MSG_ACCEPT 0x12 #define TRANSFER_PAD 0x18 #define SET_ATN 0x1a #define RESET_ATN 0x1b #define SEND_MSG 0x20 #define SEND_STATUS 0x21 #define SEND_DATA 0x22 #define DISCONN_SEQ 0x23 #define TERMINATE_SEQ 0x24 #define TARG_CMD_COMPLETE 0x25 #define DISCONN 0x27 #define RECV_MSG 0x28 #define RECV_CMD 0x29 #define RECV_DATA 0x2a #define RECV_CMD_SEQ 0x2b #define TARGET_ABORT_DMA 0x04 /* ================================================================== */ struct scsi_info_t { struct pcmcia_device *p_dev; struct Scsi_Host *host; unsigned short manf_id; }; /* * Repository for per-instance host data. */ struct sym53c500_data { struct scsi_cmnd *current_SC; int fast_pio; }; struct sym53c500_cmd_priv { int status; int message; int phase; }; enum Phase { idle, data_out, data_in, command_ph, status_ph, message_out, message_in }; /* ================================================================== */ static void chip_init(int io_port) { REG1(io_port); outb(0x01, io_port + PIO_STATUS); outb(0x00, io_port + PIO_FLAG); outb(C4_IMG, io_port + CONFIG4); /* REG0(io_port); */ outb(C3_IMG, io_port + CONFIG3); outb(C2_IMG, io_port + CONFIG2); outb(C1_IMG, io_port + CONFIG1); outb(0x05, io_port + CLKCONV); /* clock conversion factor */ outb(0x9C, io_port + SRTIMOUT); /* Selection timeout */ outb(0x05, io_port + SYNCPRD); /* Synchronous transfer period */ outb(SYNC_MODE, io_port + SYNCOFF); /* synchronous mode */ } static void SYM53C500_int_host_reset(int io_port) { outb(C4_IMG, io_port + CONFIG4); /* REG0(io_port); */ outb(CHIP_RESET, io_port + CMD_REG); outb(SCSI_NOP, io_port + CMD_REG); /* required after reset */ outb(SCSI_RESET, io_port + CMD_REG); chip_init(io_port); } static __inline__ int SYM53C500_pio_read(int fast_pio, int base, unsigned char *request, unsigned int reqlen) { int i; int len; /* current scsi fifo size */ REG1(base); while (reqlen) { i = inb(base + PIO_STATUS); /* VDEB(printk("pio_status=%x\n", i)); */ if (i & 0x80) return 0; switch (i & 0x1e) { default: case 0x10: /* fifo empty */ len = 0; break; case 0x0: len = 1; break; case 0x8: /* fifo 1/3 full */ len = 42; break; case 0xc: /* fifo 2/3 full */ len = 84; break; case 0xe: /* fifo full */ len = 128; break; } if ((i & 0x40) && len == 0) { /* fifo empty and interrupt occurred */ return 0; } if (len) { if (len > reqlen) len = reqlen; if (fast_pio && len > 3) { insl(base + PIO_FIFO, request, len >> 2); request += len & 0xfc; reqlen -= len & 0xfc; } else { while (len--) { *request++ = inb(base + PIO_FIFO); reqlen--; } } } } return 0; } static __inline__ int SYM53C500_pio_write(int fast_pio, int base, unsigned char *request, unsigned int reqlen) { int i = 0; int len; /* current scsi fifo size */ REG1(base); while (reqlen && !(i & 0x40)) { i = inb(base + PIO_STATUS); /* VDEB(printk("pio_status=%x\n", i)); */ if (i & 0x80) /* error */ return 0; switch (i & 0x1e) { case 0x10: len = 128; break; case 0x0: len = 84; break; case 0x8: len = 42; break; case 0xc: len = 1; break; default: case 0xe: len = 0; break; } if (len) { if (len > reqlen) len = reqlen; if (fast_pio && len > 3) { outsl(base + PIO_FIFO, request, len >> 2); request += len & 0xfc; reqlen -= len & 0xfc; } else { while (len--) { outb(*request++, base + PIO_FIFO); reqlen--; } } } } return 0; } static irqreturn_t SYM53C500_intr(int irq, void *dev_id) { unsigned long flags; struct Scsi_Host *dev = dev_id; DEB(unsigned char fifo_size;) DEB(unsigned char seq_reg;) unsigned char status, int_reg; unsigned char pio_status; int port_base = dev->io_port; struct sym53c500_data *data = (struct sym53c500_data *)dev->hostdata; struct scsi_cmnd *curSC = data->current_SC; struct sym53c500_cmd_priv *scp = scsi_cmd_priv(curSC); int fast_pio = data->fast_pio; spin_lock_irqsave(dev->host_lock, flags); VDEB(printk("SYM53C500_intr called\n")); REG1(port_base); pio_status = inb(port_base + PIO_STATUS); REG0(port_base); status = inb(port_base + STAT_REG); DEB(seq_reg = inb(port_base + SEQ_REG)); int_reg = inb(port_base + INT_REG); DEB(fifo_size = inb(port_base + FIFO_FLAGS) & 0x1f); #if SYM53C500_DEBUG printk("status=%02x, seq_reg=%02x, int_reg=%02x, fifo_size=%02x", status, seq_reg, int_reg, fifo_size); printk(", pio=%02x\n", pio_status); #endif /* SYM53C500_DEBUG */ if (int_reg & 0x80) { /* SCSI reset intr */ DEB(printk("SYM53C500: reset intr received\n")); curSC->result = DID_RESET << 16; goto idle_out; } if (pio_status & 0x80) { printk("SYM53C500: Warning: PIO error!\n"); curSC->result = DID_ERROR << 16; goto idle_out; } if (status & 0x20) { /* Parity error */ printk("SYM53C500: Warning: parity error!\n"); curSC->result = DID_PARITY << 16; goto idle_out; } if (status & 0x40) { /* Gross error */ printk("SYM53C500: Warning: gross error!\n"); curSC->result = DID_ERROR << 16; goto idle_out; } if (int_reg & 0x20) { /* Disconnect */ DEB(printk("SYM53C500: disconnect intr received\n")); if (scp->phase != message_in) { /* Unexpected disconnect */ curSC->result = DID_NO_CONNECT << 16; } else { /* Command complete, return status and message */ curSC->result = (scp->status & 0xff) | ((scp->message & 0xff) << 8) | (DID_OK << 16); } goto idle_out; } switch (status & 0x07) { /* scsi phase */ case 0x00: /* DATA-OUT */ if (int_reg & 0x10) { /* Target requesting info transfer */ struct scatterlist *sg; int i; scp->phase = data_out; VDEB(printk("SYM53C500: Data-Out phase\n")); outb(FLUSH_FIFO, port_base + CMD_REG); LOAD_DMA_COUNT(port_base, scsi_bufflen(curSC)); /* Max transfer size */ outb(TRANSFER_INFO | DMA_OP, port_base + CMD_REG); scsi_for_each_sg(curSC, sg, scsi_sg_count(curSC), i) { SYM53C500_pio_write(fast_pio, port_base, sg_virt(sg), sg->length); } REG0(port_base); } break; case 0x01: /* DATA-IN */ if (int_reg & 0x10) { /* Target requesting info transfer */ struct scatterlist *sg; int i; scp->phase = data_in; VDEB(printk("SYM53C500: Data-In phase\n")); outb(FLUSH_FIFO, port_base + CMD_REG); LOAD_DMA_COUNT(port_base, scsi_bufflen(curSC)); /* Max transfer size */ outb(TRANSFER_INFO | DMA_OP, port_base + CMD_REG); scsi_for_each_sg(curSC, sg, scsi_sg_count(curSC), i) { SYM53C500_pio_read(fast_pio, port_base, sg_virt(sg), sg->length); } REG0(port_base); } break; case 0x02: /* COMMAND */ scp->phase = command_ph; printk("SYM53C500: Warning: Unknown interrupt occurred in command phase!\n"); break; case 0x03: /* STATUS */ scp->phase = status_ph; VDEB(printk("SYM53C500: Status phase\n")); outb(FLUSH_FIFO, port_base + CMD_REG); outb(INIT_CMD_COMPLETE, port_base + CMD_REG); break; case 0x04: /* Reserved */ case 0x05: /* Reserved */ printk("SYM53C500: WARNING: Reserved phase!!!\n"); break; case 0x06: /* MESSAGE-OUT */ DEB(printk("SYM53C500: Message-Out phase\n")); scp->phase = message_out; outb(SET_ATN, port_base + CMD_REG); /* Reject the message */ outb(MSG_ACCEPT, port_base + CMD_REG); break; case 0x07: /* MESSAGE-IN */ VDEB(printk("SYM53C500: Message-In phase\n")); scp->phase = message_in; scp->status = inb(port_base + SCSI_FIFO); scp->message = inb(port_base + SCSI_FIFO); VDEB(printk("SCSI FIFO size=%d\n", inb(port_base + FIFO_FLAGS) & 0x1f)); DEB(printk("Status = %02x Message = %02x\n", scp->status, scp->message)); if (scp->message == SAVE_POINTERS || scp->message == DISCONNECT) { outb(SET_ATN, port_base + CMD_REG); /* Reject message */ DEB(printk("Discarding SAVE_POINTERS message\n")); } outb(MSG_ACCEPT, port_base + CMD_REG); break; } out: spin_unlock_irqrestore(dev->host_lock, flags); return IRQ_HANDLED; idle_out: scp->phase = idle; scsi_done(curSC); goto out; } static void SYM53C500_release(struct pcmcia_device *link) { struct scsi_info_t *info = link->priv; struct Scsi_Host *shost = info->host; dev_dbg(&link->dev, "SYM53C500_release\n"); /* * Do this before releasing/freeing resources. */ scsi_remove_host(shost); /* * Interrupts getting hosed on card removal. Try * the following code, mostly from qlogicfas.c. */ if (shost->irq) free_irq(shost->irq, shost); if (shost->io_port && shost->n_io_port) release_region(shost->io_port, shost->n_io_port); pcmcia_disable_device(link); scsi_host_put(shost); } /* SYM53C500_release */ static const char* SYM53C500_info(struct Scsi_Host *SChost) { static char info_msg[256]; struct sym53c500_data *data = (struct sym53c500_data *)SChost->hostdata; DEB(printk("SYM53C500_info called\n")); (void)snprintf(info_msg, sizeof(info_msg), "SYM53C500 at 0x%lx, IRQ %d, %s PIO mode.", SChost->io_port, SChost->irq, data->fast_pio ? "fast" : "slow"); return (info_msg); } static int SYM53C500_queue_lck(struct scsi_cmnd *SCpnt) { struct sym53c500_cmd_priv *scp = scsi_cmd_priv(SCpnt); int i; int port_base = SCpnt->device->host->io_port; struct sym53c500_data *data = (struct sym53c500_data *)SCpnt->device->host->hostdata; VDEB(printk("SYM53C500_queue called\n")); DEB(printk("cmd=%02x, cmd_len=%02x, target=%02x, lun=%02x, bufflen=%d\n", SCpnt->cmnd[0], SCpnt->cmd_len, SCpnt->device->id, (u8)SCpnt->device->lun, scsi_bufflen(SCpnt))); VDEB(for (i = 0; i < SCpnt->cmd_len; i++) printk("cmd[%d]=%02x ", i, SCpnt->cmnd[i])); VDEB(printk("\n")); data->current_SC = SCpnt; scp->phase = command_ph; scp->status = 0; scp->message = 0; /* We are locked here already by the mid layer */ REG0(port_base); outb(scmd_id(SCpnt), port_base + DEST_ID); /* set destination */ outb(FLUSH_FIFO, port_base + CMD_REG); /* reset the fifos */ for (i = 0; i < SCpnt->cmd_len; i++) { outb(SCpnt->cmnd[i], port_base + SCSI_FIFO); } outb(SELECT_NO_ATN, port_base + CMD_REG); return 0; } static DEF_SCSI_QCMD(SYM53C500_queue) static int SYM53C500_host_reset(struct scsi_cmnd *SCpnt) { int port_base = SCpnt->device->host->io_port; DEB(printk("SYM53C500_host_reset called\n")); spin_lock_irq(SCpnt->device->host->host_lock); SYM53C500_int_host_reset(port_base); spin_unlock_irq(SCpnt->device->host->host_lock); return SUCCESS; } static int SYM53C500_biosparm(struct scsi_device *disk, struct block_device *dev, sector_t capacity, int *info_array) { int size; DEB(printk("SYM53C500_biosparm called\n")); size = capacity; info_array[0] = 64; /* heads */ info_array[1] = 32; /* sectors */ info_array[2] = size >> 11; /* cylinders */ if (info_array[2] > 1024) { /* big disk */ info_array[0] = 255; info_array[1] = 63; info_array[2] = size / (255 * 63); } return 0; } static ssize_t SYM53C500_show_pio(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *SHp = class_to_shost(dev); struct sym53c500_data *data = (struct sym53c500_data *)SHp->hostdata; return snprintf(buf, 4, "%d\n", data->fast_pio); } static ssize_t SYM53C500_store_pio(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int pio; struct Scsi_Host *SHp = class_to_shost(dev); struct sym53c500_data *data = (struct sym53c500_data *)SHp->hostdata; pio = simple_strtoul(buf, NULL, 0); if (pio == 0 || pio == 1) { data->fast_pio = pio; return count; } else return -EINVAL; } /* * SCSI HBA device attributes we want to * make available via sysfs. */ static struct device_attribute SYM53C500_pio_attr = { .attr = { .name = "fast_pio", .mode = (S_IRUGO | S_IWUSR), }, .show = SYM53C500_show_pio, .store = SYM53C500_store_pio, }; static struct attribute *SYM53C500_shost_attrs[] = { &SYM53C500_pio_attr.attr, NULL, }; ATTRIBUTE_GROUPS(SYM53C500_shost); /* * scsi_host_template initializer */ static const struct scsi_host_template sym53c500_driver_template = { .module = THIS_MODULE, .name = "SYM53C500", .info = SYM53C500_info, .queuecommand = SYM53C500_queue, .eh_host_reset_handler = SYM53C500_host_reset, .bios_param = SYM53C500_biosparm, .proc_name = "SYM53C500", .can_queue = 1, .this_id = 7, .sg_tablesize = 32, .shost_groups = SYM53C500_shost_groups, .cmd_size = sizeof(struct sym53c500_cmd_priv), }; static int SYM53C500_config_check(struct pcmcia_device *p_dev, void *priv_data) { p_dev->io_lines = 10; p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; if (p_dev->resource[0]->start == 0) return -ENODEV; return pcmcia_request_io(p_dev); } static int SYM53C500_config(struct pcmcia_device *link) { struct scsi_info_t *info = link->priv; int ret; int irq_level, port_base; struct Scsi_Host *host; const struct scsi_host_template *tpnt = &sym53c500_driver_template; struct sym53c500_data *data; dev_dbg(&link->dev, "SYM53C500_config\n"); info->manf_id = link->manf_id; ret = pcmcia_loop_config(link, SYM53C500_config_check, NULL); if (ret) goto failed; if (!link->irq) goto failed; ret = pcmcia_enable_device(link); if (ret) goto failed; /* * That's the trouble with copying liberally from another driver. * Some things probably aren't relevant, and I suspect this entire * section dealing with manufacturer IDs can be scrapped. --rct */ if ((info->manf_id == MANFID_MACNICA) || (info->manf_id == MANFID_PIONEER) || (info->manf_id == 0x0098)) { /* set ATAcmd */ outb(0xb4, link->resource[0]->start + 0xd); outb(0x24, link->resource[0]->start + 0x9); outb(0x04, link->resource[0]->start + 0xd); } /* * irq_level == 0 implies tpnt->can_queue == 0, which * is not supported in 2.6. Thus, only irq_level > 0 * will be allowed. * * Possible port_base values are as follows: * * 0x130, 0x230, 0x280, 0x290, * 0x320, 0x330, 0x340, 0x350 */ port_base = link->resource[0]->start; irq_level = link->irq; DEB(printk("SYM53C500: port_base=0x%x, irq=%d, fast_pio=%d\n", port_base, irq_level, USE_FAST_PIO);) chip_init(port_base); host = scsi_host_alloc(tpnt, sizeof(struct sym53c500_data)); if (!host) { printk("SYM53C500: Unable to register host, giving up.\n"); goto err_release; } data = (struct sym53c500_data *)host->hostdata; if (irq_level > 0) { if (request_irq(irq_level, SYM53C500_intr, IRQF_SHARED, "SYM53C500", host)) { printk("SYM53C500: unable to allocate IRQ %d\n", irq_level); goto err_free_scsi; } DEB(printk("SYM53C500: allocated IRQ %d\n", irq_level)); } else if (irq_level == 0) { DEB(printk("SYM53C500: No interrupts detected\n")); goto err_free_scsi; } else { DEB(printk("SYM53C500: Shouldn't get here!\n")); goto err_free_scsi; } host->unique_id = port_base; host->irq = irq_level; host->io_port = port_base; host->n_io_port = 0x10; host->dma_channel = -1; /* * Note fast_pio is set to USE_FAST_PIO by * default, but can be changed via "sysfs". */ data->fast_pio = USE_FAST_PIO; info->host = host; if (scsi_add_host(host, NULL)) goto err_free_irq; scsi_scan_host(host); return 0; err_free_irq: free_irq(irq_level, host); err_free_scsi: scsi_host_put(host); err_release: release_region(port_base, 0x10); printk(KERN_INFO "sym53c500_cs: no SCSI devices found\n"); return -ENODEV; failed: SYM53C500_release(link); return -ENODEV; } /* SYM53C500_config */ static int sym53c500_resume(struct pcmcia_device *link) { struct scsi_info_t *info = link->priv; /* See earlier comment about manufacturer IDs. */ if ((info->manf_id == MANFID_MACNICA) || (info->manf_id == MANFID_PIONEER) || (info->manf_id == 0x0098)) { outb(0x80, link->resource[0]->start + 0xd); outb(0x24, link->resource[0]->start + 0x9); outb(0x04, link->resource[0]->start + 0xd); } /* * If things don't work after a "resume", * this is a good place to start looking. */ SYM53C500_int_host_reset(link->resource[0]->start); return 0; } static void SYM53C500_detach(struct pcmcia_device *link) { dev_dbg(&link->dev, "SYM53C500_detach\n"); SYM53C500_release(link); kfree(link->priv); link->priv = NULL; } /* SYM53C500_detach */ static int SYM53C500_probe(struct pcmcia_device *link) { struct scsi_info_t *info; dev_dbg(&link->dev, "SYM53C500_attach()\n"); /* Create new SCSI device */ info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; info->p_dev = link; link->priv = info; link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; return SYM53C500_config(link); } /* SYM53C500_attach */ MODULE_AUTHOR("Bob Tracy <[email protected]>"); MODULE_DESCRIPTION("SYM53C500 PCMCIA SCSI driver"); MODULE_LICENSE("GPL"); static const struct pcmcia_device_id sym53c500_ids[] = { PCMCIA_DEVICE_PROD_ID12("BASICS by New Media Corporation", "SCSI Sym53C500", 0x23c78a9d, 0x0099e7f7), PCMCIA_DEVICE_PROD_ID12("New Media Corporation", "SCSI Bus Toaster Sym53C500", 0x085a850b, 0x45432eb8), PCMCIA_DEVICE_PROD_ID2("SCSI9000", 0x21648f44), PCMCIA_DEVICE_NULL, }; MODULE_DEVICE_TABLE(pcmcia, sym53c500_ids); static struct pcmcia_driver sym53c500_cs_driver = { .owner = THIS_MODULE, .name = "sym53c500_cs", .probe = SYM53C500_probe, .remove = SYM53C500_detach, .id_table = sym53c500_ids, .resume = sym53c500_resume, }; module_pcmcia_driver(sym53c500_cs_driver);
linux-master
drivers/scsi/pcmcia/sym53c500_cs.c
/*====================================================================== A driver for Adaptec AHA152X-compatible PCMCIA SCSI cards. This driver supports the Adaptec AHA-1460, the New Media Bus Toaster, and the New Media Toast & Jam. aha152x_cs.c 1.54 2000/06/12 21:27:25 The contents of this file are subject to the Mozilla Public License Version 1.1 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.mozilla.org/MPL/ Software distributed under the License is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for the specific language governing rights and limitations under the License. The initial developer of the original code is David A. Hinds <[email protected]>. Portions created by David A. Hinds are Copyright (C) 1999 David A. Hinds. All Rights Reserved. Alternatively, the contents of this file may be used under the terms of the GNU General Public License version 2 (the "GPL"), in which case the provisions of the GPL are applicable instead of the above. If you wish to allow the use of your version of this file only under the terms of the GPL and not to allow others to use your version of this file under the MPL, indicate your decision by deleting the provisions above and replace them with the notice and other provisions required by the GPL. If you do not delete the provisions above, a recipient may use your version of this file under either the MPL or the GPL. ======================================================================*/ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/ioport.h> #include <linux/major.h> #include <linux/blkdev.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_host.h> #include <scsi/scsi_ioctl.h> #include <scsi/scsi_tcq.h> #include "aha152x.h" #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> /*====================================================================*/ /* Parameters that can be set with 'insmod' */ /* SCSI bus setup options */ static int host_id = 7; static int reconnect = 1; static int parity = 1; static int synchronous = 1; static int reset_delay = 100; static int ext_trans = 0; module_param(host_id, int, 0); module_param(reconnect, int, 0); module_param(parity, int, 0); module_param(synchronous, int, 0); module_param(reset_delay, int, 0); module_param(ext_trans, int, 0); MODULE_LICENSE("Dual MPL/GPL"); /*====================================================================*/ typedef struct scsi_info_t { struct pcmcia_device *p_dev; struct Scsi_Host *host; } scsi_info_t; static void aha152x_release_cs(struct pcmcia_device *link); static void aha152x_detach(struct pcmcia_device *p_dev); static int aha152x_config_cs(struct pcmcia_device *link); static int aha152x_probe(struct pcmcia_device *link) { scsi_info_t *info; dev_dbg(&link->dev, "aha152x_attach()\n"); /* Create new SCSI device */ info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; info->p_dev = link; link->priv = info; link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; link->config_regs = PRESENT_OPTION; return aha152x_config_cs(link); } /* aha152x_attach */ /*====================================================================*/ static void aha152x_detach(struct pcmcia_device *link) { dev_dbg(&link->dev, "aha152x_detach\n"); aha152x_release_cs(link); /* Unlink device structure, free bits */ kfree(link->priv); } /* aha152x_detach */ /*====================================================================*/ static int aha152x_config_check(struct pcmcia_device *p_dev, void *priv_data) { p_dev->io_lines = 10; /* For New Media T&J, look for a SCSI window */ if ((p_dev->resource[0]->end < 0x20) && (p_dev->resource[1]->end >= 0x20)) p_dev->resource[0]->start = p_dev->resource[1]->start; if (p_dev->resource[0]->start >= 0xffff) return -EINVAL; p_dev->resource[1]->start = p_dev->resource[1]->end = 0; p_dev->resource[0]->end = 0x20; p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; return pcmcia_request_io(p_dev); } static int aha152x_config_cs(struct pcmcia_device *link) { scsi_info_t *info = link->priv; struct aha152x_setup s; int ret; struct Scsi_Host *host; dev_dbg(&link->dev, "aha152x_config\n"); ret = pcmcia_loop_config(link, aha152x_config_check, NULL); if (ret) goto failed; if (!link->irq) goto failed; ret = pcmcia_enable_device(link); if (ret) goto failed; /* Set configuration options for the aha152x driver */ memset(&s, 0, sizeof(s)); s.conf = "PCMCIA setup"; s.io_port = link->resource[0]->start; s.irq = link->irq; s.scsiid = host_id; s.reconnect = reconnect; s.parity = parity; s.synchronous = synchronous; s.delay = reset_delay; if (ext_trans) s.ext_trans = ext_trans; host = aha152x_probe_one(&s); if (host == NULL) { printk(KERN_INFO "aha152x_cs: no SCSI devices found\n"); goto failed; } info->host = host; return 0; failed: aha152x_release_cs(link); return -ENODEV; } static void aha152x_release_cs(struct pcmcia_device *link) { scsi_info_t *info = link->priv; aha152x_release(info->host); pcmcia_disable_device(link); } static int aha152x_resume(struct pcmcia_device *link) { scsi_info_t *info = link->priv; aha152x_host_reset_host(info->host); return 0; } static const struct pcmcia_device_id aha152x_ids[] = { PCMCIA_DEVICE_PROD_ID123("New Media", "SCSI", "Bus Toaster", 0xcdf7e4cc, 0x35f26476, 0xa8851d6e), PCMCIA_DEVICE_PROD_ID123("NOTEWORTHY", "SCSI", "Bus Toaster", 0xad89c6e8, 0x35f26476, 0xa8851d6e), PCMCIA_DEVICE_PROD_ID12("Adaptec, Inc.", "APA-1460 SCSI Host Adapter", 0x24ba9738, 0x3a3c3d20), PCMCIA_DEVICE_PROD_ID12("New Media Corporation", "Multimedia Sound/SCSI", 0x085a850b, 0x80a6535c), PCMCIA_DEVICE_PROD_ID12("NOTEWORTHY", "NWCOMB02 SCSI/AUDIO COMBO CARD", 0xad89c6e8, 0x5f9a615b), PCMCIA_DEVICE_NULL, }; MODULE_DEVICE_TABLE(pcmcia, aha152x_ids); static struct pcmcia_driver aha152x_cs_driver = { .owner = THIS_MODULE, .name = "aha152x_cs", .probe = aha152x_probe, .remove = aha152x_detach, .id_table = aha152x_ids, .resume = aha152x_resume, }; module_pcmcia_driver(aha152x_cs_driver);
linux-master
drivers/scsi/pcmcia/aha152x_stub.c
// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1) /* * Driver for Future Domain-compatible PCMCIA SCSI cards * Copyright 2019 Ondrej Zary * * The initial developer of the original code is David A. Hinds * <[email protected]>. Portions created by David A. Hinds * are Copyright (C) 1999 David A. Hinds. All Rights Reserved. */ #include <linux/module.h> #include <linux/init.h> #include <scsi/scsi_host.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> #include "fdomain.h" MODULE_AUTHOR("Ondrej Zary, David Hinds"); MODULE_DESCRIPTION("Future Domain PCMCIA SCSI driver"); MODULE_LICENSE("Dual MPL/GPL"); static int fdomain_config_check(struct pcmcia_device *p_dev, void *priv_data) { p_dev->io_lines = 10; p_dev->resource[0]->end = FDOMAIN_REGION_SIZE; p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; return pcmcia_request_io(p_dev); } static int fdomain_probe(struct pcmcia_device *link) { int ret; struct Scsi_Host *sh; link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; link->config_regs = PRESENT_OPTION; ret = pcmcia_loop_config(link, fdomain_config_check, NULL); if (ret) return ret; ret = pcmcia_enable_device(link); if (ret) goto fail_disable; if (!request_region(link->resource[0]->start, FDOMAIN_REGION_SIZE, "fdomain_cs")) { ret = -EBUSY; goto fail_disable; } sh = fdomain_create(link->resource[0]->start, link->irq, 7, &link->dev); if (!sh) { dev_err(&link->dev, "Controller initialization failed"); ret = -ENODEV; goto fail_release; } link->priv = sh; return 0; fail_release: release_region(link->resource[0]->start, FDOMAIN_REGION_SIZE); fail_disable: pcmcia_disable_device(link); return ret; } static void fdomain_remove(struct pcmcia_device *link) { fdomain_destroy(link->priv); release_region(link->resource[0]->start, FDOMAIN_REGION_SIZE); pcmcia_disable_device(link); } static const struct pcmcia_device_id fdomain_ids[] = { PCMCIA_DEVICE_PROD_ID12("IBM Corp.", "SCSI PCMCIA Card", 0xe3736c88, 0x859cad20), PCMCIA_DEVICE_PROD_ID1("SCSI PCMCIA Adapter Card", 0x8dacb57e), PCMCIA_DEVICE_PROD_ID12(" SIMPLE TECHNOLOGY Corporation", "SCSI PCMCIA Credit Card Controller", 0x182bdafe, 0xc80d106f), PCMCIA_DEVICE_NULL, }; MODULE_DEVICE_TABLE(pcmcia, fdomain_ids); static struct pcmcia_driver fdomain_cs_driver = { .owner = THIS_MODULE, .name = "fdomain_cs", .probe = fdomain_probe, .remove = fdomain_remove, .id_table = fdomain_ids, }; module_pcmcia_driver(fdomain_cs_driver);
linux-master
drivers/scsi/pcmcia/fdomain_cs.c
/*======================================================================== Debug routines for nsp_cs By: YOKOTA Hiroshi <[email protected]> This software may be used and distributed according to the terms of the GNU General Public License. =========================================================================*/ /* $Id: nsp_debug.c,v 1.3 2003/07/26 14:21:09 elca Exp $ */ /* * Show the command data of a command */ static const char unknown[] = "UNKNOWN"; static const char * group_0_commands[] = { /* 00-03 */ "Test Unit Ready", "Rezero Unit", unknown, "Request Sense", /* 04-07 */ "Format Unit", "Read Block Limits", unknown, "Reassign Blocks", /* 08-0d */ "Read (6)", unknown, "Write (6)", "Seek (6)", unknown, unknown, /* 0e-12 */ unknown, "Read Reverse", "Write Filemarks", "Space", "Inquiry", /* 13-16 */ unknown, "Recover Buffered Data", "Mode Select", "Reserve", /* 17-1b */ "Release", "Copy", "Erase", "Mode Sense", "Start/Stop Unit", /* 1c-1d */ "Receive Diagnostic", "Send Diagnostic", /* 1e-1f */ "Prevent/Allow Medium Removal", unknown, }; static const char *group_1_commands[] = { /* 20-22 */ unknown, unknown, unknown, /* 23-28 */ unknown, unknown, "Read Capacity", unknown, unknown, "Read (10)", /* 29-2d */ unknown, "Write (10)", "Seek (10)", unknown, unknown, /* 2e-31 */ "Write Verify","Verify", "Search High", "Search Equal", /* 32-34 */ "Search Low", "Set Limits", "Prefetch or Read Position", /* 35-37 */ "Synchronize Cache","Lock/Unlock Cache", "Read Defect Data", /* 38-3c */ "Medium Scan", "Compare","Copy Verify", "Write Buffer", "Read Buffer", /* 3d-3f */ "Update Block", "Read Long", "Write Long", }; static const char *group_2_commands[] = { /* 40-41 */ "Change Definition", "Write Same", /* 42-48 */ "Read Sub-Ch(cd)", "Read TOC", "Read Header(cd)", "Play Audio(cd)", unknown, "Play Audio MSF(cd)", "Play Audio Track/Index(cd)", /* 49-4f */ "Play Track Relative(10)(cd)", unknown, "Pause/Resume(cd)", "Log Select", "Log Sense", unknown, unknown, /* 50-55 */ unknown, unknown, unknown, unknown, unknown, "Mode Select (10)", /* 56-5b */ unknown, unknown, unknown, unknown, "Mode Sense (10)", unknown, /* 5c-5f */ unknown, unknown, unknown, }; #define group(opcode) (((opcode) >> 5) & 7) #define RESERVED_GROUP 0 #define VENDOR_GROUP 1 #define NOTEXT_GROUP 2 static const char **commands[] = { group_0_commands, group_1_commands, group_2_commands, (const char **) RESERVED_GROUP, (const char **) RESERVED_GROUP, (const char **) NOTEXT_GROUP, (const char **) VENDOR_GROUP, (const char **) VENDOR_GROUP }; static const char reserved[] = "RESERVED"; static const char vendor[] = "VENDOR SPECIFIC"; static void print_opcodek(unsigned char opcode) { const char **table = commands[ group(opcode) ]; switch ((unsigned long) table) { case RESERVED_GROUP: printk("%s[%02x] ", reserved, opcode); break; case NOTEXT_GROUP: printk("%s(notext)[%02x] ", unknown, opcode); break; case VENDOR_GROUP: printk("%s[%02x] ", vendor, opcode); break; default: if (table[opcode & 0x1f] != unknown) printk("%s[%02x] ", table[opcode & 0x1f], opcode); else printk("%s[%02x] ", unknown, opcode); break; } } static void print_commandk (unsigned char *command) { int i, s; printk(KERN_DEBUG); print_opcodek(command[0]); /*printk(KERN_DEBUG "%s ", __func__);*/ if ((command[0] >> 5) == 6 || (command[0] >> 5) == 7 ) { s = 12; /* vender specific */ } else { s = COMMAND_SIZE(command[0]); } for ( i = 1; i < s; ++i) { printk("%02x ", command[i]); } switch (s) { case 6: printk("LBA=%d len=%d", (((unsigned int)command[1] & 0x0f) << 16) | ( (unsigned int)command[2] << 8) | ( (unsigned int)command[3] ), (unsigned int)command[4] ); break; case 10: printk("LBA=%d len=%d", ((unsigned int)command[2] << 24) | ((unsigned int)command[3] << 16) | ((unsigned int)command[4] << 8) | ((unsigned int)command[5] ), ((unsigned int)command[7] << 8) | ((unsigned int)command[8] ) ); break; case 12: printk("LBA=%d len=%d", ((unsigned int)command[2] << 24) | ((unsigned int)command[3] << 16) | ((unsigned int)command[4] << 8) | ((unsigned int)command[5] ), ((unsigned int)command[6] << 24) | ((unsigned int)command[7] << 16) | ((unsigned int)command[8] << 8) | ((unsigned int)command[9] ) ); break; default: break; } printk("\n"); } static void show_command(struct scsi_cmnd *SCpnt) { print_commandk(SCpnt->cmnd); } static void show_phase(struct scsi_cmnd *SCpnt) { int i = nsp_scsi_pointer(SCpnt)->phase; char *ph[] = { "PH_UNDETERMINED", "PH_ARBSTART", "PH_SELSTART", "PH_SELECTED", "PH_COMMAND", "PH_DATA", "PH_STATUS", "PH_MSG_IN", "PH_MSG_OUT", "PH_DISCONNECT", "PH_RESELECT" }; if ( i < PH_UNDETERMINED || i > PH_RESELECT ) { printk(KERN_DEBUG "scsi phase: unknown(%d)\n", i); return; } printk(KERN_DEBUG "scsi phase: %s\n", ph[i]); return; } static void show_busphase(unsigned char stat) { switch(stat) { case BUSPHASE_COMMAND: printk(KERN_DEBUG "BUSPHASE_COMMAND\n"); break; case BUSPHASE_MESSAGE_IN: printk(KERN_DEBUG "BUSPHASE_MESSAGE_IN\n"); break; case BUSPHASE_MESSAGE_OUT: printk(KERN_DEBUG "BUSPHASE_MESSAGE_OUT\n"); break; case BUSPHASE_DATA_IN: printk(KERN_DEBUG "BUSPHASE_DATA_IN\n"); break; case BUSPHASE_DATA_OUT: printk(KERN_DEBUG "BUSPHASE_DATA_OUT\n"); break; case BUSPHASE_STATUS: printk(KERN_DEBUG "BUSPHASE_STATUS\n"); break; case BUSPHASE_SELECT: printk(KERN_DEBUG "BUSPHASE_SELECT\n"); break; default: printk(KERN_DEBUG "BUSPHASE_other\n"); break; } } static void show_message(nsp_hw_data *data) { int i; printk(KERN_DEBUG "msg:"); for(i=0; i < data->MsgLen; i++) { printk(" %02x", data->MsgBuffer[i]); } printk("\n"); } /* end */
linux-master
drivers/scsi/pcmcia/nsp_debug.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Linux MegaRAID driver for SAS based RAID controllers * * Copyright (c) 2003-2013 LSI Corporation * Copyright (c) 2013-2016 Avago Technologies * Copyright (c) 2016-2018 Broadcom Inc. * * Authors: Broadcom Inc. * Sreenivas Bagalkote * Sumant Patro * Bo Yang * Adam Radford * Kashyap Desai <[email protected]> * Sumit Saxena <[email protected]> * * Send feedback to: [email protected] */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/list.h> #include <linux/moduleparam.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/uio.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <asm/unaligned.h> #include <linux/fs.h> #include <linux/compat.h> #include <linux/blkdev.h> #include <linux/mutex.h> #include <linux/poll.h> #include <linux/vmalloc.h> #include <linux/irq_poll.h> #include <linux/blk-mq-pci.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_dbg.h> #include "megaraid_sas_fusion.h" #include "megaraid_sas.h" /* * Number of sectors per IO command * Will be set in megasas_init_mfi if user does not provide */ static unsigned int max_sectors; module_param_named(max_sectors, max_sectors, int, 0444); MODULE_PARM_DESC(max_sectors, "Maximum number of sectors per IO command"); static int msix_disable; module_param(msix_disable, int, 0444); MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0"); static unsigned int msix_vectors; module_param(msix_vectors, int, 0444); MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW"); static int allow_vf_ioctls; module_param(allow_vf_ioctls, int, 0444); MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0"); static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH; module_param(throttlequeuedepth, int, 0444); MODULE_PARM_DESC(throttlequeuedepth, "Adapter queue depth when throttled due to I/O timeout. Default: 16"); unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME; module_param(resetwaittime, int, 0444); MODULE_PARM_DESC(resetwaittime, "Wait time in (1-180s) after I/O timeout before resetting adapter. Default: 180s"); static int smp_affinity_enable = 1; module_param(smp_affinity_enable, int, 0444); MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)"); static int rdpq_enable = 1; module_param(rdpq_enable, int, 0444); MODULE_PARM_DESC(rdpq_enable, "Allocate reply queue in chunks for large queue depth enable/disable Default: enable(1)"); unsigned int dual_qdepth_disable; module_param(dual_qdepth_disable, int, 0444); MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0"); static unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT; module_param(scmd_timeout, int, 0444); MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer."); int perf_mode = -1; module_param(perf_mode, int, 0444); MODULE_PARM_DESC(perf_mode, "Performance mode (only for Aero adapters), options:\n\t\t" "0 - balanced: High iops and low latency queues are allocated &\n\t\t" "interrupt coalescing is enabled only on high iops queues\n\t\t" "1 - iops: High iops queues are not allocated &\n\t\t" "interrupt coalescing is enabled on all queues\n\t\t" "2 - latency: High iops queues are not allocated &\n\t\t" "interrupt coalescing is disabled on all queues\n\t\t" "default mode is 'balanced'" ); int event_log_level = MFI_EVT_CLASS_CRITICAL; module_param(event_log_level, int, 0644); MODULE_PARM_DESC(event_log_level, "Asynchronous event logging level- range is: -2(CLASS_DEBUG) to 4(CLASS_DEAD), Default: 2(CLASS_CRITICAL)"); unsigned int enable_sdev_max_qd; module_param(enable_sdev_max_qd, int, 0444); MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0"); int poll_queues; module_param(poll_queues, int, 0444); MODULE_PARM_DESC(poll_queues, "Number of queues to be use for io_uring poll mode.\n\t\t" "This parameter is effective only if host_tagset_enable=1 &\n\t\t" "It is not applicable for MFI_SERIES. &\n\t\t" "Driver will work in latency mode. &\n\t\t" "High iops queues are not allocated &\n\t\t" ); int host_tagset_enable = 1; module_param(host_tagset_enable, int, 0444); MODULE_PARM_DESC(host_tagset_enable, "Shared host tagset enable/disable Default: enable(1)"); MODULE_LICENSE("GPL"); MODULE_VERSION(MEGASAS_VERSION); MODULE_AUTHOR("[email protected]"); MODULE_DESCRIPTION("Broadcom MegaRAID SAS Driver"); int megasas_transition_to_ready(struct megasas_instance *instance, int ocr); static int megasas_get_pd_list(struct megasas_instance *instance); static int megasas_ld_list_query(struct megasas_instance *instance, u8 query_type); static int megasas_issue_init_mfi(struct megasas_instance *instance); static int megasas_register_aen(struct megasas_instance *instance, u32 seq_num, u32 class_locale_word); static void megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev); static void megasas_set_ld_removed_by_fw(struct megasas_instance *instance); /* * PCI ID table for all supported controllers */ static struct pci_device_id megasas_pci_table[] = { {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)}, /* xscale IOP */ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)}, /* ppc IOP */ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)}, /* ppc IOP */ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)}, /* gen2*/ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)}, /* gen2*/ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)}, /* skinny*/ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)}, /* skinny*/ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)}, /* xscale IOP, vega */ {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)}, /* xscale IOP */ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)}, /* Fusion */ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)}, /* Plasma */ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)}, /* Invader */ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)}, /* Fury */ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER)}, /* Intruder */ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER_24)}, /* Intruder 24 port*/ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)}, {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)}, /* VENTURA */ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)}, {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER)}, {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)}, {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)}, {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)}, {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)}, {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E1)}, {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E2)}, {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E5)}, {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E6)}, {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E0)}, {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E3)}, {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E4)}, {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E7)}, {} }; MODULE_DEVICE_TABLE(pci, megasas_pci_table); static int megasas_mgmt_majorno; struct megasas_mgmt_info megasas_mgmt_info; static struct fasync_struct *megasas_async_queue; static DEFINE_MUTEX(megasas_async_queue_mutex); static int megasas_poll_wait_aen; static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait); static u32 support_poll_for_event; u32 megasas_dbg_lvl; static u32 support_device_change; static bool support_nvme_encapsulation; static bool support_pci_lane_margining; /* define lock for aen poll */ static DEFINE_SPINLOCK(poll_aen_lock); extern struct dentry *megasas_debugfs_root; extern int megasas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num); void megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, u8 alt_status); static u32 megasas_read_fw_status_reg_gen2(struct megasas_instance *instance); static int megasas_adp_reset_gen2(struct megasas_instance *instance, struct megasas_register_set __iomem *reg_set); static irqreturn_t megasas_isr(int irq, void *devp); static u32 megasas_init_adapter_mfi(struct megasas_instance *instance); u32 megasas_build_and_issue_cmd(struct megasas_instance *instance, struct scsi_cmnd *scmd); static void megasas_complete_cmd_dpc(unsigned long instance_addr); int wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, int seconds); void megasas_fusion_ocr_wq(struct work_struct *work); static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, int initial); static int megasas_set_dma_mask(struct megasas_instance *instance); static int megasas_alloc_ctrl_mem(struct megasas_instance *instance); static inline void megasas_free_ctrl_mem(struct megasas_instance *instance); static inline int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance); static inline void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance); static inline void megasas_init_ctrl_params(struct megasas_instance *instance); u32 megasas_readl(struct megasas_instance *instance, const volatile void __iomem *addr) { u32 i = 0, ret_val; /* * Due to a HW errata in Aero controllers, reads to certain * Fusion registers could intermittently return all zeroes. * This behavior is transient in nature and subsequent reads will * return valid value. As a workaround in driver, retry readl for * upto three times until a non-zero value is read. */ if (instance->adapter_type == AERO_SERIES) { do { ret_val = readl(addr); i++; } while (ret_val == 0 && i < 3); return ret_val; } else { return readl(addr); } } /** * megasas_set_dma_settings - Populate DMA address, length and flags for DCMDs * @instance: Adapter soft state * @dcmd: DCMD frame inside MFI command * @dma_addr: DMA address of buffer to be passed to FW * @dma_len: Length of DMA buffer to be passed to FW * @return: void */ void megasas_set_dma_settings(struct megasas_instance *instance, struct megasas_dcmd_frame *dcmd, dma_addr_t dma_addr, u32 dma_len) { if (instance->consistent_mask_64bit) { dcmd->sgl.sge64[0].phys_addr = cpu_to_le64(dma_addr); dcmd->sgl.sge64[0].length = cpu_to_le32(dma_len); dcmd->flags = cpu_to_le16(dcmd->flags | MFI_FRAME_SGL64); } else { dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(lower_32_bits(dma_addr)); dcmd->sgl.sge32[0].length = cpu_to_le32(dma_len); dcmd->flags = cpu_to_le16(dcmd->flags); } } static void megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd) { instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, 0, instance->reg_set); return; } /** * megasas_get_cmd - Get a command from the free pool * @instance: Adapter soft state * * Returns a free command from the pool */ struct megasas_cmd *megasas_get_cmd(struct megasas_instance *instance) { unsigned long flags; struct megasas_cmd *cmd = NULL; spin_lock_irqsave(&instance->mfi_pool_lock, flags); if (!list_empty(&instance->cmd_pool)) { cmd = list_entry((&instance->cmd_pool)->next, struct megasas_cmd, list); list_del_init(&cmd->list); } else { dev_err(&instance->pdev->dev, "Command pool empty!\n"); } spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); return cmd; } /** * megasas_return_cmd - Return a cmd to free command pool * @instance: Adapter soft state * @cmd: Command packet to be returned to free command pool */ void megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) { unsigned long flags; u32 blk_tags; struct megasas_cmd_fusion *cmd_fusion; struct fusion_context *fusion = instance->ctrl_context; /* This flag is used only for fusion adapter. * Wait for Interrupt for Polled mode DCMD */ if (cmd->flags & DRV_DCMD_POLLED_MODE) return; spin_lock_irqsave(&instance->mfi_pool_lock, flags); if (fusion) { blk_tags = instance->max_scsi_cmds + cmd->index; cmd_fusion = fusion->cmd_list[blk_tags]; megasas_return_cmd_fusion(instance, cmd_fusion); } cmd->scmd = NULL; cmd->frame_count = 0; cmd->flags = 0; memset(cmd->frame, 0, instance->mfi_frame_size); cmd->frame->io.context = cpu_to_le32(cmd->index); if (!fusion && reset_devices) cmd->frame->hdr.cmd = MFI_CMD_INVALID; list_add(&cmd->list, (&instance->cmd_pool)->next); spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); } static const char * format_timestamp(uint32_t timestamp) { static char buffer[32]; if ((timestamp & 0xff000000) == 0xff000000) snprintf(buffer, sizeof(buffer), "boot + %us", timestamp & 0x00ffffff); else snprintf(buffer, sizeof(buffer), "%us", timestamp); return buffer; } static const char * format_class(int8_t class) { static char buffer[6]; switch (class) { case MFI_EVT_CLASS_DEBUG: return "debug"; case MFI_EVT_CLASS_PROGRESS: return "progress"; case MFI_EVT_CLASS_INFO: return "info"; case MFI_EVT_CLASS_WARNING: return "WARN"; case MFI_EVT_CLASS_CRITICAL: return "CRIT"; case MFI_EVT_CLASS_FATAL: return "FATAL"; case MFI_EVT_CLASS_DEAD: return "DEAD"; default: snprintf(buffer, sizeof(buffer), "%d", class); return buffer; } } /** * megasas_decode_evt: Decode FW AEN event and print critical event * for information. * @instance: Adapter soft state */ static void megasas_decode_evt(struct megasas_instance *instance) { struct megasas_evt_detail *evt_detail = instance->evt_detail; union megasas_evt_class_locale class_locale; class_locale.word = le32_to_cpu(evt_detail->cl.word); if ((event_log_level < MFI_EVT_CLASS_DEBUG) || (event_log_level > MFI_EVT_CLASS_DEAD)) { printk(KERN_WARNING "megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n"); event_log_level = MFI_EVT_CLASS_CRITICAL; } if (class_locale.members.class >= event_log_level) dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n", le32_to_cpu(evt_detail->seq_num), format_timestamp(le32_to_cpu(evt_detail->time_stamp)), (class_locale.members.locale), format_class(class_locale.members.class), evt_detail->description); if (megasas_dbg_lvl & LD_PD_DEBUG) dev_info(&instance->pdev->dev, "evt_detail.args.ld.target_id/index %d/%d\n", evt_detail->args.ld.target_id, evt_detail->args.ld.ld_index); } /* * The following functions are defined for xscale * (deviceid : 1064R, PERC5) controllers */ /** * megasas_enable_intr_xscale - Enables interrupts * @instance: Adapter soft state */ static inline void megasas_enable_intr_xscale(struct megasas_instance *instance) { struct megasas_register_set __iomem *regs; regs = instance->reg_set; writel(0, &(regs)->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_mask); } /** * megasas_disable_intr_xscale -Disables interrupt * @instance: Adapter soft state */ static inline void megasas_disable_intr_xscale(struct megasas_instance *instance) { struct megasas_register_set __iomem *regs; u32 mask = 0x1f; regs = instance->reg_set; writel(mask, &regs->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_mask); } /** * megasas_read_fw_status_reg_xscale - returns the current FW status value * @instance: Adapter soft state */ static u32 megasas_read_fw_status_reg_xscale(struct megasas_instance *instance) { return readl(&instance->reg_set->outbound_msg_0); } /** * megasas_clear_intr_xscale - Check & clear interrupt * @instance: Adapter soft state */ static int megasas_clear_intr_xscale(struct megasas_instance *instance) { u32 status; u32 mfiStatus = 0; struct megasas_register_set __iomem *regs; regs = instance->reg_set; /* * Check if it is our interrupt */ status = readl(&regs->outbound_intr_status); if (status & MFI_OB_INTR_STATUS_MASK) mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT) mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; /* * Clear the interrupt by writing back the same value */ if (mfiStatus) writel(status, &regs->outbound_intr_status); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_status); return mfiStatus; } /** * megasas_fire_cmd_xscale - Sends command to the FW * @instance: Adapter soft state * @frame_phys_addr : Physical address of cmd * @frame_count : Number of frames for the command * @regs : MFI register set */ static inline void megasas_fire_cmd_xscale(struct megasas_instance *instance, dma_addr_t frame_phys_addr, u32 frame_count, struct megasas_register_set __iomem *regs) { unsigned long flags; spin_lock_irqsave(&instance->hba_lock, flags); writel((frame_phys_addr >> 3)|(frame_count), &(regs)->inbound_queue_port); spin_unlock_irqrestore(&instance->hba_lock, flags); } /** * megasas_adp_reset_xscale - For controller reset * @instance: Adapter soft state * @regs: MFI register set */ static int megasas_adp_reset_xscale(struct megasas_instance *instance, struct megasas_register_set __iomem *regs) { u32 i; u32 pcidata; writel(MFI_ADP_RESET, &regs->inbound_doorbell); for (i = 0; i < 3; i++) msleep(1000); /* sleep for 3 secs */ pcidata = 0; pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata); dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata); if (pcidata & 0x2) { dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata); pcidata &= ~0x2; pci_write_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, pcidata); for (i = 0; i < 2; i++) msleep(1000); /* need to wait 2 secs again */ pcidata = 0; pci_read_config_dword(instance->pdev, MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata); dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata); if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) { dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata); pcidata = 0; pci_write_config_dword(instance->pdev, MFI_1068_FW_HANDSHAKE_OFFSET, pcidata); } } return 0; } /** * megasas_check_reset_xscale - For controller reset check * @instance: Adapter soft state * @regs: MFI register set */ static int megasas_check_reset_xscale(struct megasas_instance *instance, struct megasas_register_set __iomem *regs) { if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) && (le32_to_cpu(*instance->consumer) == MEGASAS_ADPRESET_INPROG_SIGN)) return 1; return 0; } static struct megasas_instance_template megasas_instance_template_xscale = { .fire_cmd = megasas_fire_cmd_xscale, .enable_intr = megasas_enable_intr_xscale, .disable_intr = megasas_disable_intr_xscale, .clear_intr = megasas_clear_intr_xscale, .read_fw_status_reg = megasas_read_fw_status_reg_xscale, .adp_reset = megasas_adp_reset_xscale, .check_reset = megasas_check_reset_xscale, .service_isr = megasas_isr, .tasklet = megasas_complete_cmd_dpc, .init_adapter = megasas_init_adapter_mfi, .build_and_issue_cmd = megasas_build_and_issue_cmd, .issue_dcmd = megasas_issue_dcmd, }; /* * This is the end of set of functions & definitions specific * to xscale (deviceid : 1064R, PERC5) controllers */ /* * The following functions are defined for ppc (deviceid : 0x60) * controllers */ /** * megasas_enable_intr_ppc - Enables interrupts * @instance: Adapter soft state */ static inline void megasas_enable_intr_ppc(struct megasas_instance *instance) { struct megasas_register_set __iomem *regs; regs = instance->reg_set; writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); writel(~0x80000000, &(regs)->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_mask); } /** * megasas_disable_intr_ppc - Disable interrupt * @instance: Adapter soft state */ static inline void megasas_disable_intr_ppc(struct megasas_instance *instance) { struct megasas_register_set __iomem *regs; u32 mask = 0xFFFFFFFF; regs = instance->reg_set; writel(mask, &regs->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_mask); } /** * megasas_read_fw_status_reg_ppc - returns the current FW status value * @instance: Adapter soft state */ static u32 megasas_read_fw_status_reg_ppc(struct megasas_instance *instance) { return readl(&instance->reg_set->outbound_scratch_pad_0); } /** * megasas_clear_intr_ppc - Check & clear interrupt * @instance: Adapter soft state */ static int megasas_clear_intr_ppc(struct megasas_instance *instance) { u32 status, mfiStatus = 0; struct megasas_register_set __iomem *regs; regs = instance->reg_set; /* * Check if it is our interrupt */ status = readl(&regs->outbound_intr_status); if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT) mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; /* * Clear the interrupt by writing back the same value */ writel(status, &regs->outbound_doorbell_clear); /* Dummy readl to force pci flush */ readl(&regs->outbound_doorbell_clear); return mfiStatus; } /** * megasas_fire_cmd_ppc - Sends command to the FW * @instance: Adapter soft state * @frame_phys_addr: Physical address of cmd * @frame_count: Number of frames for the command * @regs: MFI register set */ static inline void megasas_fire_cmd_ppc(struct megasas_instance *instance, dma_addr_t frame_phys_addr, u32 frame_count, struct megasas_register_set __iomem *regs) { unsigned long flags; spin_lock_irqsave(&instance->hba_lock, flags); writel((frame_phys_addr | (frame_count<<1))|1, &(regs)->inbound_queue_port); spin_unlock_irqrestore(&instance->hba_lock, flags); } /** * megasas_check_reset_ppc - For controller reset check * @instance: Adapter soft state * @regs: MFI register set */ static int megasas_check_reset_ppc(struct megasas_instance *instance, struct megasas_register_set __iomem *regs) { if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) return 1; return 0; } static struct megasas_instance_template megasas_instance_template_ppc = { .fire_cmd = megasas_fire_cmd_ppc, .enable_intr = megasas_enable_intr_ppc, .disable_intr = megasas_disable_intr_ppc, .clear_intr = megasas_clear_intr_ppc, .read_fw_status_reg = megasas_read_fw_status_reg_ppc, .adp_reset = megasas_adp_reset_xscale, .check_reset = megasas_check_reset_ppc, .service_isr = megasas_isr, .tasklet = megasas_complete_cmd_dpc, .init_adapter = megasas_init_adapter_mfi, .build_and_issue_cmd = megasas_build_and_issue_cmd, .issue_dcmd = megasas_issue_dcmd, }; /** * megasas_enable_intr_skinny - Enables interrupts * @instance: Adapter soft state */ static inline void megasas_enable_intr_skinny(struct megasas_instance *instance) { struct megasas_register_set __iomem *regs; regs = instance->reg_set; writel(0xFFFFFFFF, &(regs)->outbound_intr_mask); writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_mask); } /** * megasas_disable_intr_skinny - Disables interrupt * @instance: Adapter soft state */ static inline void megasas_disable_intr_skinny(struct megasas_instance *instance) { struct megasas_register_set __iomem *regs; u32 mask = 0xFFFFFFFF; regs = instance->reg_set; writel(mask, &regs->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_mask); } /** * megasas_read_fw_status_reg_skinny - returns the current FW status value * @instance: Adapter soft state */ static u32 megasas_read_fw_status_reg_skinny(struct megasas_instance *instance) { return readl(&instance->reg_set->outbound_scratch_pad_0); } /** * megasas_clear_intr_skinny - Check & clear interrupt * @instance: Adapter soft state */ static int megasas_clear_intr_skinny(struct megasas_instance *instance) { u32 status; u32 mfiStatus = 0; struct megasas_register_set __iomem *regs; regs = instance->reg_set; /* * Check if it is our interrupt */ status = readl(&regs->outbound_intr_status); if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) { return 0; } /* * Check if it is our interrupt */ if ((megasas_read_fw_status_reg_skinny(instance) & MFI_STATE_MASK) == MFI_STATE_FAULT) { mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; } else mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; /* * Clear the interrupt by writing back the same value */ writel(status, &regs->outbound_intr_status); /* * dummy read to flush PCI */ readl(&regs->outbound_intr_status); return mfiStatus; } /** * megasas_fire_cmd_skinny - Sends command to the FW * @instance: Adapter soft state * @frame_phys_addr: Physical address of cmd * @frame_count: Number of frames for the command * @regs: MFI register set */ static inline void megasas_fire_cmd_skinny(struct megasas_instance *instance, dma_addr_t frame_phys_addr, u32 frame_count, struct megasas_register_set __iomem *regs) { unsigned long flags; spin_lock_irqsave(&instance->hba_lock, flags); writel(upper_32_bits(frame_phys_addr), &(regs)->inbound_high_queue_port); writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1, &(regs)->inbound_low_queue_port); spin_unlock_irqrestore(&instance->hba_lock, flags); } /** * megasas_check_reset_skinny - For controller reset check * @instance: Adapter soft state * @regs: MFI register set */ static int megasas_check_reset_skinny(struct megasas_instance *instance, struct megasas_register_set __iomem *regs) { if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) return 1; return 0; } static struct megasas_instance_template megasas_instance_template_skinny = { .fire_cmd = megasas_fire_cmd_skinny, .enable_intr = megasas_enable_intr_skinny, .disable_intr = megasas_disable_intr_skinny, .clear_intr = megasas_clear_intr_skinny, .read_fw_status_reg = megasas_read_fw_status_reg_skinny, .adp_reset = megasas_adp_reset_gen2, .check_reset = megasas_check_reset_skinny, .service_isr = megasas_isr, .tasklet = megasas_complete_cmd_dpc, .init_adapter = megasas_init_adapter_mfi, .build_and_issue_cmd = megasas_build_and_issue_cmd, .issue_dcmd = megasas_issue_dcmd, }; /* * The following functions are defined for gen2 (deviceid : 0x78 0x79) * controllers */ /** * megasas_enable_intr_gen2 - Enables interrupts * @instance: Adapter soft state */ static inline void megasas_enable_intr_gen2(struct megasas_instance *instance) { struct megasas_register_set __iomem *regs; regs = instance->reg_set; writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); /* write ~0x00000005 (4 & 1) to the intr mask*/ writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_mask); } /** * megasas_disable_intr_gen2 - Disables interrupt * @instance: Adapter soft state */ static inline void megasas_disable_intr_gen2(struct megasas_instance *instance) { struct megasas_register_set __iomem *regs; u32 mask = 0xFFFFFFFF; regs = instance->reg_set; writel(mask, &regs->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_mask); } /** * megasas_read_fw_status_reg_gen2 - returns the current FW status value * @instance: Adapter soft state */ static u32 megasas_read_fw_status_reg_gen2(struct megasas_instance *instance) { return readl(&instance->reg_set->outbound_scratch_pad_0); } /** * megasas_clear_intr_gen2 - Check & clear interrupt * @instance: Adapter soft state */ static int megasas_clear_intr_gen2(struct megasas_instance *instance) { u32 status; u32 mfiStatus = 0; struct megasas_register_set __iomem *regs; regs = instance->reg_set; /* * Check if it is our interrupt */ status = readl(&regs->outbound_intr_status); if (status & MFI_INTR_FLAG_REPLY_MESSAGE) { mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; } if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) { mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; } /* * Clear the interrupt by writing back the same value */ if (mfiStatus) writel(status, &regs->outbound_doorbell_clear); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_status); return mfiStatus; } /** * megasas_fire_cmd_gen2 - Sends command to the FW * @instance: Adapter soft state * @frame_phys_addr: Physical address of cmd * @frame_count: Number of frames for the command * @regs: MFI register set */ static inline void megasas_fire_cmd_gen2(struct megasas_instance *instance, dma_addr_t frame_phys_addr, u32 frame_count, struct megasas_register_set __iomem *regs) { unsigned long flags; spin_lock_irqsave(&instance->hba_lock, flags); writel((frame_phys_addr | (frame_count<<1))|1, &(regs)->inbound_queue_port); spin_unlock_irqrestore(&instance->hba_lock, flags); } /** * megasas_adp_reset_gen2 - For controller reset * @instance: Adapter soft state * @reg_set: MFI register set */ static int megasas_adp_reset_gen2(struct megasas_instance *instance, struct megasas_register_set __iomem *reg_set) { u32 retry = 0 ; u32 HostDiag; u32 __iomem *seq_offset = &reg_set->seq_offset; u32 __iomem *hostdiag_offset = &reg_set->host_diag; if (instance->instancet == &megasas_instance_template_skinny) { seq_offset = &reg_set->fusion_seq_offset; hostdiag_offset = &reg_set->fusion_host_diag; } writel(0, seq_offset); writel(4, seq_offset); writel(0xb, seq_offset); writel(2, seq_offset); writel(7, seq_offset); writel(0xd, seq_offset); msleep(1000); HostDiag = (u32)readl(hostdiag_offset); while (!(HostDiag & DIAG_WRITE_ENABLE)) { msleep(100); HostDiag = (u32)readl(hostdiag_offset); dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n", retry, HostDiag); if (retry++ >= 100) return 1; } dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag); writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset); ssleep(10); HostDiag = (u32)readl(hostdiag_offset); while (HostDiag & DIAG_RESET_ADAPTER) { msleep(100); HostDiag = (u32)readl(hostdiag_offset); dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n", retry, HostDiag); if (retry++ >= 1000) return 1; } return 0; } /** * megasas_check_reset_gen2 - For controller reset check * @instance: Adapter soft state * @regs: MFI register set */ static int megasas_check_reset_gen2(struct megasas_instance *instance, struct megasas_register_set __iomem *regs) { if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) return 1; return 0; } static struct megasas_instance_template megasas_instance_template_gen2 = { .fire_cmd = megasas_fire_cmd_gen2, .enable_intr = megasas_enable_intr_gen2, .disable_intr = megasas_disable_intr_gen2, .clear_intr = megasas_clear_intr_gen2, .read_fw_status_reg = megasas_read_fw_status_reg_gen2, .adp_reset = megasas_adp_reset_gen2, .check_reset = megasas_check_reset_gen2, .service_isr = megasas_isr, .tasklet = megasas_complete_cmd_dpc, .init_adapter = megasas_init_adapter_mfi, .build_and_issue_cmd = megasas_build_and_issue_cmd, .issue_dcmd = megasas_issue_dcmd, }; /* * This is the end of set of functions & definitions * specific to gen2 (deviceid : 0x78, 0x79) controllers */ /* * Template added for TB (Fusion) */ extern struct megasas_instance_template megasas_instance_template_fusion; /** * megasas_issue_polled - Issues a polling command * @instance: Adapter soft state * @cmd: Command packet to be issued * * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting. */ int megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd) { struct megasas_header *frame_hdr = &cmd->frame->hdr; frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS; frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return DCMD_INIT; } instance->instancet->issue_dcmd(instance, cmd); return wait_and_poll(instance, cmd, instance->requestorId ? MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS); } /** * megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds * @instance: Adapter soft state * @cmd: Command to be issued * @timeout: Timeout in seconds * * This function waits on an event for the command to be returned from ISR. * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs * Used to issue ioctl commands. */ int megasas_issue_blocked_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, int timeout) { int ret = 0; cmd->cmd_status_drv = DCMD_INIT; if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return DCMD_INIT; } instance->instancet->issue_dcmd(instance, cmd); if (timeout) { ret = wait_event_timeout(instance->int_cmd_wait_q, cmd->cmd_status_drv != DCMD_INIT, timeout * HZ); if (!ret) { dev_err(&instance->pdev->dev, "DCMD(opcode: 0x%x) is timed out, func:%s\n", cmd->frame->dcmd.opcode, __func__); return DCMD_TIMEOUT; } } else wait_event(instance->int_cmd_wait_q, cmd->cmd_status_drv != DCMD_INIT); return cmd->cmd_status_drv; } /** * megasas_issue_blocked_abort_cmd - Aborts previously issued cmd * @instance: Adapter soft state * @cmd_to_abort: Previously issued cmd to be aborted * @timeout: Timeout in seconds * * MFI firmware can abort previously issued AEN comamnd (automatic event * notification). The megasas_issue_blocked_abort_cmd() issues such abort * cmd and waits for return status. * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs */ static int megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd_to_abort, int timeout) { struct megasas_cmd *cmd; struct megasas_abort_frame *abort_fr; int ret = 0; u32 opcode; cmd = megasas_get_cmd(instance); if (!cmd) return -1; abort_fr = &cmd->frame->abort; /* * Prepare and issue the abort frame */ abort_fr->cmd = MFI_CMD_ABORT; abort_fr->cmd_status = MFI_STAT_INVALID_STATUS; abort_fr->flags = cpu_to_le16(0); abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index); abort_fr->abort_mfi_phys_addr_lo = cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr)); abort_fr->abort_mfi_phys_addr_hi = cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr)); cmd->sync_cmd = 1; cmd->cmd_status_drv = DCMD_INIT; if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return DCMD_INIT; } instance->instancet->issue_dcmd(instance, cmd); if (timeout) { ret = wait_event_timeout(instance->abort_cmd_wait_q, cmd->cmd_status_drv != DCMD_INIT, timeout * HZ); if (!ret) { opcode = cmd_to_abort->frame->dcmd.opcode; dev_err(&instance->pdev->dev, "Abort(to be aborted DCMD opcode: 0x%x) is timed out func:%s\n", opcode, __func__); return DCMD_TIMEOUT; } } else wait_event(instance->abort_cmd_wait_q, cmd->cmd_status_drv != DCMD_INIT); cmd->sync_cmd = 0; megasas_return_cmd(instance, cmd); return cmd->cmd_status_drv; } /** * megasas_make_sgl32 - Prepares 32-bit SGL * @instance: Adapter soft state * @scp: SCSI command from the mid-layer * @mfi_sgl: SGL to be filled in * * If successful, this function returns the number of SG elements. Otherwise, * it returnes -1. */ static int megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl) { int i; int sge_count; struct scatterlist *os_sgl; sge_count = scsi_dma_map(scp); BUG_ON(sge_count < 0); if (sge_count) { scsi_for_each_sg(scp, os_sgl, sge_count, i) { mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl)); mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl)); } } return sge_count; } /** * megasas_make_sgl64 - Prepares 64-bit SGL * @instance: Adapter soft state * @scp: SCSI command from the mid-layer * @mfi_sgl: SGL to be filled in * * If successful, this function returns the number of SG elements. Otherwise, * it returnes -1. */ static int megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl) { int i; int sge_count; struct scatterlist *os_sgl; sge_count = scsi_dma_map(scp); BUG_ON(sge_count < 0); if (sge_count) { scsi_for_each_sg(scp, os_sgl, sge_count, i) { mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl)); mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl)); } } return sge_count; } /** * megasas_make_sgl_skinny - Prepares IEEE SGL * @instance: Adapter soft state * @scp: SCSI command from the mid-layer * @mfi_sgl: SGL to be filled in * * If successful, this function returns the number of SG elements. Otherwise, * it returnes -1. */ static int megasas_make_sgl_skinny(struct megasas_instance *instance, struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl) { int i; int sge_count; struct scatterlist *os_sgl; sge_count = scsi_dma_map(scp); if (sge_count) { scsi_for_each_sg(scp, os_sgl, sge_count, i) { mfi_sgl->sge_skinny[i].length = cpu_to_le32(sg_dma_len(os_sgl)); mfi_sgl->sge_skinny[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl)); mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0); } } return sge_count; } /** * megasas_get_frame_count - Computes the number of frames * @frame_type : type of frame- io or pthru frame * @sge_count : number of sg elements * * Returns the number of frames required for numnber of sge's (sge_count) */ static u32 megasas_get_frame_count(struct megasas_instance *instance, u8 sge_count, u8 frame_type) { int num_cnt; int sge_bytes; u32 sge_sz; u32 frame_count = 0; sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : sizeof(struct megasas_sge32); if (instance->flag_ieee) { sge_sz = sizeof(struct megasas_sge_skinny); } /* * Main frame can contain 2 SGEs for 64-bit SGLs and * 3 SGEs for 32-bit SGLs for ldio & * 1 SGEs for 64-bit SGLs and * 2 SGEs for 32-bit SGLs for pthru frame */ if (unlikely(frame_type == PTHRU_FRAME)) { if (instance->flag_ieee == 1) { num_cnt = sge_count - 1; } else if (IS_DMA64) num_cnt = sge_count - 1; else num_cnt = sge_count - 2; } else { if (instance->flag_ieee == 1) { num_cnt = sge_count - 1; } else if (IS_DMA64) num_cnt = sge_count - 2; else num_cnt = sge_count - 3; } if (num_cnt > 0) { sge_bytes = sge_sz * num_cnt; frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) + ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ; } /* Main frame */ frame_count += 1; if (frame_count > 7) frame_count = 8; return frame_count; } /** * megasas_build_dcdb - Prepares a direct cdb (DCDB) command * @instance: Adapter soft state * @scp: SCSI command * @cmd: Command to be prepared in * * This function prepares CDB commands. These are typcially pass-through * commands to the devices. */ static int megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, struct megasas_cmd *cmd) { u32 is_logical; u32 device_id; u16 flags = 0; struct megasas_pthru_frame *pthru; is_logical = MEGASAS_IS_LOGICAL(scp->device); device_id = MEGASAS_DEV_INDEX(scp); pthru = (struct megasas_pthru_frame *)cmd->frame; if (scp->sc_data_direction == DMA_TO_DEVICE) flags = MFI_FRAME_DIR_WRITE; else if (scp->sc_data_direction == DMA_FROM_DEVICE) flags = MFI_FRAME_DIR_READ; else if (scp->sc_data_direction == DMA_NONE) flags = MFI_FRAME_DIR_NONE; if (instance->flag_ieee == 1) { flags |= MFI_FRAME_IEEE; } /* * Prepare the DCDB frame */ pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO; pthru->cmd_status = 0x0; pthru->scsi_status = 0x0; pthru->target_id = device_id; pthru->lun = scp->device->lun; pthru->cdb_len = scp->cmd_len; pthru->timeout = 0; pthru->pad_0 = 0; pthru->flags = cpu_to_le16(flags); pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp)); memcpy(pthru->cdb, scp->cmnd, scp->cmd_len); /* * If the command is for the tape device, set the * pthru timeout to the os layer timeout value. */ if (scp->device->type == TYPE_TAPE) { if (scsi_cmd_to_rq(scp)->timeout / HZ > 0xFFFF) pthru->timeout = cpu_to_le16(0xFFFF); else pthru->timeout = cpu_to_le16(scsi_cmd_to_rq(scp)->timeout / HZ); } /* * Construct SGL */ if (instance->flag_ieee == 1) { pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64); pthru->sge_count = megasas_make_sgl_skinny(instance, scp, &pthru->sgl); } else if (IS_DMA64) { pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64); pthru->sge_count = megasas_make_sgl64(instance, scp, &pthru->sgl); } else pthru->sge_count = megasas_make_sgl32(instance, scp, &pthru->sgl); if (pthru->sge_count > instance->max_num_sge) { dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n", pthru->sge_count); return 0; } /* * Sense info specific */ pthru->sense_len = SCSI_SENSE_BUFFERSIZE; pthru->sense_buf_phys_addr_hi = cpu_to_le32(upper_32_bits(cmd->sense_phys_addr)); pthru->sense_buf_phys_addr_lo = cpu_to_le32(lower_32_bits(cmd->sense_phys_addr)); /* * Compute the total number of frames this command consumes. FW uses * this number to pull sufficient number of frames from host memory. */ cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count, PTHRU_FRAME); return cmd->frame_count; } /** * megasas_build_ldio - Prepares IOs to logical devices * @instance: Adapter soft state * @scp: SCSI command * @cmd: Command to be prepared * * Frames (and accompanying SGLs) for regular SCSI IOs use this function. */ static int megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, struct megasas_cmd *cmd) { u32 device_id; u8 sc = scp->cmnd[0]; u16 flags = 0; struct megasas_io_frame *ldio; device_id = MEGASAS_DEV_INDEX(scp); ldio = (struct megasas_io_frame *)cmd->frame; if (scp->sc_data_direction == DMA_TO_DEVICE) flags = MFI_FRAME_DIR_WRITE; else if (scp->sc_data_direction == DMA_FROM_DEVICE) flags = MFI_FRAME_DIR_READ; if (instance->flag_ieee == 1) { flags |= MFI_FRAME_IEEE; } /* * Prepare the Logical IO frame: 2nd bit is zero for all read cmds */ ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ; ldio->cmd_status = 0x0; ldio->scsi_status = 0x0; ldio->target_id = device_id; ldio->timeout = 0; ldio->reserved_0 = 0; ldio->pad_0 = 0; ldio->flags = cpu_to_le16(flags); ldio->start_lba_hi = 0; ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0; /* * 6-byte READ(0x08) or WRITE(0x0A) cdb */ if (scp->cmd_len == 6) { ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]); ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) | ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3]); ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF); } /* * 10-byte READ(0x28) or WRITE(0x2A) cdb */ else if (scp->cmd_len == 10) { ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] | ((u32) scp->cmnd[7] << 8)); ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) | ((u32) scp->cmnd[3] << 16) | ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]); } /* * 12-byte READ(0xA8) or WRITE(0xAA) cdb */ else if (scp->cmd_len == 12) { ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) | ((u32) scp->cmnd[7] << 16) | ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]); ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) | ((u32) scp->cmnd[3] << 16) | ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]); } /* * 16-byte READ(0x88) or WRITE(0x8A) cdb */ else if (scp->cmd_len == 16) { ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) | ((u32) scp->cmnd[11] << 16) | ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13]); ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) | ((u32) scp->cmnd[7] << 16) | ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]); ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) | ((u32) scp->cmnd[3] << 16) | ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]); } /* * Construct SGL */ if (instance->flag_ieee) { ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64); ldio->sge_count = megasas_make_sgl_skinny(instance, scp, &ldio->sgl); } else if (IS_DMA64) { ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64); ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl); } else ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl); if (ldio->sge_count > instance->max_num_sge) { dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n", ldio->sge_count); return 0; } /* * Sense info specific */ ldio->sense_len = SCSI_SENSE_BUFFERSIZE; ldio->sense_buf_phys_addr_hi = 0; ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr); /* * Compute the total number of frames this command consumes. FW uses * this number to pull sufficient number of frames from host memory. */ cmd->frame_count = megasas_get_frame_count(instance, ldio->sge_count, IO_FRAME); return cmd->frame_count; } /** * megasas_cmd_type - Checks if the cmd is for logical drive/sysPD * and whether it's RW or non RW * @cmd: SCSI command * */ inline int megasas_cmd_type(struct scsi_cmnd *cmd) { int ret; switch (cmd->cmnd[0]) { case READ_10: case WRITE_10: case READ_12: case WRITE_12: case READ_6: case WRITE_6: case READ_16: case WRITE_16: ret = (MEGASAS_IS_LOGICAL(cmd->device)) ? READ_WRITE_LDIO : READ_WRITE_SYSPDIO; break; default: ret = (MEGASAS_IS_LOGICAL(cmd->device)) ? NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO; } return ret; } /** * megasas_dump_pending_frames - Dumps the frame address of all pending cmds * in FW * @instance: Adapter soft state */ static inline void megasas_dump_pending_frames(struct megasas_instance *instance) { struct megasas_cmd *cmd; int i,n; union megasas_sgl *mfi_sgl; struct megasas_io_frame *ldio; struct megasas_pthru_frame *pthru; u32 sgcount; u16 max_cmd = instance->max_fw_cmds; dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no); dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding)); if (IS_DMA64) dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no); else dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no); dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no); for (i = 0; i < max_cmd; i++) { cmd = instance->cmd_list[i]; if (!cmd->scmd) continue; dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr); if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) { ldio = (struct megasas_io_frame *)cmd->frame; mfi_sgl = &ldio->sgl; sgcount = ldio->sge_count; dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x," " lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n", instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id, le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi), le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount); } else { pthru = (struct megasas_pthru_frame *) cmd->frame; mfi_sgl = &pthru->sgl; sgcount = pthru->sge_count; dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, " "lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n", instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id, pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len), le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount); } if (megasas_dbg_lvl & MEGASAS_DBG_LVL) { for (n = 0; n < sgcount; n++) { if (IS_DMA64) dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n", le32_to_cpu(mfi_sgl->sge64[n].length), le64_to_cpu(mfi_sgl->sge64[n].phys_addr)); else dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n", le32_to_cpu(mfi_sgl->sge32[n].length), le32_to_cpu(mfi_sgl->sge32[n].phys_addr)); } } } /*for max_cmd*/ dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no); for (i = 0; i < max_cmd; i++) { cmd = instance->cmd_list[i]; if (cmd->sync_cmd == 1) dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr); } dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no); } u32 megasas_build_and_issue_cmd(struct megasas_instance *instance, struct scsi_cmnd *scmd) { struct megasas_cmd *cmd; u32 frame_count; cmd = megasas_get_cmd(instance); if (!cmd) return SCSI_MLQUEUE_HOST_BUSY; /* * Logical drive command */ if (megasas_cmd_type(scmd) == READ_WRITE_LDIO) frame_count = megasas_build_ldio(instance, scmd, cmd); else frame_count = megasas_build_dcdb(instance, scmd, cmd); if (!frame_count) goto out_return_cmd; cmd->scmd = scmd; megasas_priv(scmd)->cmd_priv = cmd; /* * Issue the command to the FW */ atomic_inc(&instance->fw_outstanding); instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, cmd->frame_count-1, instance->reg_set); return 0; out_return_cmd: megasas_return_cmd(instance, cmd); return SCSI_MLQUEUE_HOST_BUSY; } /** * megasas_queue_command - Queue entry point * @shost: adapter SCSI host * @scmd: SCSI command to be queued */ static int megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd) { struct megasas_instance *instance; struct MR_PRIV_DEVICE *mr_device_priv_data; u32 ld_tgt_id; instance = (struct megasas_instance *) scmd->device->host->hostdata; if (instance->unload == 1) { scmd->result = DID_NO_CONNECT << 16; scsi_done(scmd); return 0; } if (instance->issuepend_done == 0) return SCSI_MLQUEUE_HOST_BUSY; /* Check for an mpio path and adjust behavior */ if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) { if (megasas_check_mpio_paths(instance, scmd) == (DID_REQUEUE << 16)) { return SCSI_MLQUEUE_HOST_BUSY; } else { scmd->result = DID_NO_CONNECT << 16; scsi_done(scmd); return 0; } } mr_device_priv_data = scmd->device->hostdata; if (!mr_device_priv_data || (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)) { scmd->result = DID_NO_CONNECT << 16; scsi_done(scmd); return 0; } if (MEGASAS_IS_LOGICAL(scmd->device)) { ld_tgt_id = MEGASAS_TARGET_ID(scmd->device); if (instance->ld_tgtid_status[ld_tgt_id] == LD_TARGET_ID_DELETED) { scmd->result = DID_NO_CONNECT << 16; scsi_done(scmd); return 0; } } if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) return SCSI_MLQUEUE_HOST_BUSY; if (mr_device_priv_data->tm_busy) return SCSI_MLQUEUE_DEVICE_BUSY; scmd->result = 0; if (MEGASAS_IS_LOGICAL(scmd->device) && (scmd->device->id >= instance->fw_supported_vd_count || scmd->device->lun)) { scmd->result = DID_BAD_TARGET << 16; goto out_done; } if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && MEGASAS_IS_LOGICAL(scmd->device) && (!instance->fw_sync_cache_support)) { scmd->result = DID_OK << 16; goto out_done; } return instance->instancet->build_and_issue_cmd(instance, scmd); out_done: scsi_done(scmd); return 0; } static struct megasas_instance *megasas_lookup_instance(u16 host_no) { int i; for (i = 0; i < megasas_mgmt_info.max_index; i++) { if ((megasas_mgmt_info.instance[i]) && (megasas_mgmt_info.instance[i]->host->host_no == host_no)) return megasas_mgmt_info.instance[i]; } return NULL; } /* * megasas_set_dynamic_target_properties - * Device property set by driver may not be static and it is required to be * updated after OCR * * set tm_capable. * set dma alignment (only for eedp protection enable vd). * * @sdev: OS provided scsi device * * Returns void */ void megasas_set_dynamic_target_properties(struct scsi_device *sdev, bool is_target_prop) { u16 pd_index = 0, ld; u32 device_id; struct megasas_instance *instance; struct fusion_context *fusion; struct MR_PRIV_DEVICE *mr_device_priv_data; struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; struct MR_LD_RAID *raid; struct MR_DRV_RAID_MAP_ALL *local_map_ptr; instance = megasas_lookup_instance(sdev->host->host_no); fusion = instance->ctrl_context; mr_device_priv_data = sdev->hostdata; if (!fusion || !mr_device_priv_data) return; if (MEGASAS_IS_LOGICAL(sdev)) { device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; ld = MR_TargetIdToLdGet(device_id, local_map_ptr); if (ld >= instance->fw_supported_vd_count) return; raid = MR_LdRaidGet(ld, local_map_ptr); if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) blk_queue_update_dma_alignment(sdev->request_queue, 0x7); mr_device_priv_data->is_tm_capable = raid->capability.tmCapable; if (!raid->flags.isEPD) sdev->no_write_same = 1; } else if (instance->use_seqnum_jbod_fp) { pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; pd_sync = (void *)fusion->pd_seq_sync [(instance->pd_seq_map_id - 1) & 1]; mr_device_priv_data->is_tm_capable = pd_sync->seq[pd_index].capability.tmCapable; } if (is_target_prop && instance->tgt_prop->reset_tmo) { /* * If FW provides a target reset timeout value, driver will use * it. If not set, fallback to default values. */ mr_device_priv_data->target_reset_tmo = min_t(u8, instance->max_reset_tmo, instance->tgt_prop->reset_tmo); mr_device_priv_data->task_abort_tmo = instance->task_abort_tmo; } else { mr_device_priv_data->target_reset_tmo = MEGASAS_DEFAULT_TM_TIMEOUT; mr_device_priv_data->task_abort_tmo = MEGASAS_DEFAULT_TM_TIMEOUT; } } /* * megasas_set_nvme_device_properties - * set nomerges=2 * set virtual page boundary = 4K (current mr_nvme_pg_size is 4K). * set maximum io transfer = MDTS of NVME device provided by MR firmware. * * MR firmware provides value in KB. Caller of this function converts * kb into bytes. * * e.a MDTS=5 means 2^5 * nvme page size. (In case of 4K page size, * MR firmware provides value 128 as (32 * 4K) = 128K. * * @sdev: scsi device * @max_io_size: maximum io transfer size * */ static inline void megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size) { struct megasas_instance *instance; u32 mr_nvme_pg_size; instance = (struct megasas_instance *)sdev->host->hostdata; mr_nvme_pg_size = max_t(u32, instance->nvme_page_size, MR_DEFAULT_NVME_PAGE_SIZE); blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512)); blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue); blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1); } /* * megasas_set_fw_assisted_qd - * set device queue depth to can_queue * set device queue depth to fw assisted qd * * @sdev: scsi device * @is_target_prop true, if fw provided target properties. */ static void megasas_set_fw_assisted_qd(struct scsi_device *sdev, bool is_target_prop) { u8 interface_type; u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN; u32 tgt_device_qd; struct megasas_instance *instance; struct MR_PRIV_DEVICE *mr_device_priv_data; instance = megasas_lookup_instance(sdev->host->host_no); mr_device_priv_data = sdev->hostdata; interface_type = mr_device_priv_data->interface_type; switch (interface_type) { case SAS_PD: device_qd = MEGASAS_SAS_QD; break; case SATA_PD: device_qd = MEGASAS_SATA_QD; break; case NVME_PD: device_qd = MEGASAS_NVME_QD; break; } if (is_target_prop) { tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth); if (tgt_device_qd) device_qd = min(instance->host->can_queue, (int)tgt_device_qd); } if (instance->enable_sdev_max_qd && interface_type != UNKNOWN_DRIVE) device_qd = instance->host->can_queue; scsi_change_queue_depth(sdev, device_qd); } /* * megasas_set_static_target_properties - * Device property set by driver are static and it is not required to be * updated after OCR. * * set io timeout * set device queue depth * set nvme device properties. see - megasas_set_nvme_device_properties * * @sdev: scsi device * @is_target_prop true, if fw provided target properties. */ static void megasas_set_static_target_properties(struct scsi_device *sdev, bool is_target_prop) { u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB; struct megasas_instance *instance; instance = megasas_lookup_instance(sdev->host->host_no); /* * The RAID firmware may require extended timeouts. */ blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ); /* max_io_size_kb will be set to non zero for * nvme based vd and syspd. */ if (is_target_prop) max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb); if (instance->nvme_page_size && max_io_size_kb) megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10)); megasas_set_fw_assisted_qd(sdev, is_target_prop); } static int megasas_slave_configure(struct scsi_device *sdev) { u16 pd_index = 0; struct megasas_instance *instance; int ret_target_prop = DCMD_FAILED; bool is_target_prop = false; instance = megasas_lookup_instance(sdev->host->host_no); if (instance->pd_list_not_supported) { if (!MEGASAS_IS_LOGICAL(sdev) && sdev->type == TYPE_DISK) { pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; if (instance->pd_list[pd_index].driveState != MR_PD_STATE_SYSTEM) return -ENXIO; } } mutex_lock(&instance->reset_mutex); /* Send DCMD to Firmware and cache the information */ if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev)) megasas_get_pd_info(instance, sdev); /* Some ventura firmware may not have instance->nvme_page_size set. * Do not send MR_DCMD_DRV_GET_TARGET_PROP */ if ((instance->tgt_prop) && (instance->nvme_page_size)) ret_target_prop = megasas_get_target_prop(instance, sdev); is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false; megasas_set_static_target_properties(sdev, is_target_prop); /* This sdev property may change post OCR */ megasas_set_dynamic_target_properties(sdev, is_target_prop); mutex_unlock(&instance->reset_mutex); return 0; } static int megasas_slave_alloc(struct scsi_device *sdev) { u16 pd_index = 0, ld_tgt_id; struct megasas_instance *instance ; struct MR_PRIV_DEVICE *mr_device_priv_data; instance = megasas_lookup_instance(sdev->host->host_no); if (!MEGASAS_IS_LOGICAL(sdev)) { /* * Open the OS scan to the SYSTEM PD */ pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; if ((instance->pd_list_not_supported || instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM)) { goto scan_target; } return -ENXIO; } else if (!MEGASAS_IS_LUN_VALID(sdev)) { sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__); return -ENXIO; } scan_target: mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data), GFP_KERNEL); if (!mr_device_priv_data) return -ENOMEM; if (MEGASAS_IS_LOGICAL(sdev)) { ld_tgt_id = MEGASAS_TARGET_ID(sdev); instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_ACTIVE; if (megasas_dbg_lvl & LD_PD_DEBUG) sdev_printk(KERN_INFO, sdev, "LD target ID %d created.\n", ld_tgt_id); } sdev->hostdata = mr_device_priv_data; atomic_set(&mr_device_priv_data->r1_ldio_hint, instance->r1_ldio_hint_default); return 0; } static void megasas_slave_destroy(struct scsi_device *sdev) { u16 ld_tgt_id; struct megasas_instance *instance; instance = megasas_lookup_instance(sdev->host->host_no); if (MEGASAS_IS_LOGICAL(sdev)) { if (!MEGASAS_IS_LUN_VALID(sdev)) { sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__); return; } ld_tgt_id = MEGASAS_TARGET_ID(sdev); instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_DELETED; if (megasas_dbg_lvl & LD_PD_DEBUG) sdev_printk(KERN_INFO, sdev, "LD target ID %d removed from OS stack\n", ld_tgt_id); } kfree(sdev->hostdata); sdev->hostdata = NULL; } /* * megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a * kill adapter * @instance: Adapter soft state * */ static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance) { int i; struct megasas_cmd *cmd_mfi; struct megasas_cmd_fusion *cmd_fusion; struct fusion_context *fusion = instance->ctrl_context; /* Find all outstanding ioctls */ if (fusion) { for (i = 0; i < instance->max_fw_cmds; i++) { cmd_fusion = fusion->cmd_list[i]; if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) { cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; if (cmd_mfi->sync_cmd && (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) { cmd_mfi->frame->hdr.cmd_status = MFI_STAT_WRONG_STATE; megasas_complete_cmd(instance, cmd_mfi, DID_OK); } } } } else { for (i = 0; i < instance->max_fw_cmds; i++) { cmd_mfi = instance->cmd_list[i]; if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) megasas_complete_cmd(instance, cmd_mfi, DID_OK); } } } void megaraid_sas_kill_hba(struct megasas_instance *instance) { if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { dev_warn(&instance->pdev->dev, "Adapter already dead, skipping kill HBA\n"); return; } /* Set critical error to block I/O & ioctls in case caller didn't */ atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR); /* Wait 1 second to ensure IO or ioctls in build have posted */ msleep(1000); if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || (instance->adapter_type != MFI_SERIES)) { if (!instance->requestorId) { writel(MFI_STOP_ADP, &instance->reg_set->doorbell); /* Flush */ readl(&instance->reg_set->doorbell); } if (instance->requestorId && instance->peerIsPresent) memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); } else { writel(MFI_STOP_ADP, &instance->reg_set->inbound_doorbell); } /* Complete outstanding ioctls when adapter is killed */ megasas_complete_outstanding_ioctls(instance); } /** * megasas_check_and_restore_queue_depth - Check if queue depth needs to be * restored to max value * @instance: Adapter soft state * */ void megasas_check_and_restore_queue_depth(struct megasas_instance *instance) { unsigned long flags; if (instance->flag & MEGASAS_FW_BUSY && time_after(jiffies, instance->last_time + 5 * HZ) && atomic_read(&instance->fw_outstanding) < instance->throttlequeuedepth + 1) { spin_lock_irqsave(instance->host->host_lock, flags); instance->flag &= ~MEGASAS_FW_BUSY; instance->host->can_queue = instance->cur_can_queue; spin_unlock_irqrestore(instance->host->host_lock, flags); } } /** * megasas_complete_cmd_dpc - Returns FW's controller structure * @instance_addr: Address of adapter soft state * * Tasklet to complete cmds */ static void megasas_complete_cmd_dpc(unsigned long instance_addr) { u32 producer; u32 consumer; u32 context; struct megasas_cmd *cmd; struct megasas_instance *instance = (struct megasas_instance *)instance_addr; unsigned long flags; /* If we have already declared adapter dead, donot complete cmds */ if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) return; spin_lock_irqsave(&instance->completion_lock, flags); producer = le32_to_cpu(*instance->producer); consumer = le32_to_cpu(*instance->consumer); while (consumer != producer) { context = le32_to_cpu(instance->reply_queue[consumer]); if (context >= instance->max_fw_cmds) { dev_err(&instance->pdev->dev, "Unexpected context value %x\n", context); BUG(); } cmd = instance->cmd_list[context]; megasas_complete_cmd(instance, cmd, DID_OK); consumer++; if (consumer == (instance->max_fw_cmds + 1)) { consumer = 0; } } *instance->consumer = cpu_to_le32(producer); spin_unlock_irqrestore(&instance->completion_lock, flags); /* * Check if we can restore can_queue */ megasas_check_and_restore_queue_depth(instance); } static void megasas_sriov_heartbeat_handler(struct timer_list *t); /** * megasas_start_timer - Initializes sriov heartbeat timer object * @instance: Adapter soft state * */ void megasas_start_timer(struct megasas_instance *instance) { struct timer_list *timer = &instance->sriov_heartbeat_timer; timer_setup(timer, megasas_sriov_heartbeat_handler, 0); timer->expires = jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF; add_timer(timer); } static void megasas_internal_reset_defer_cmds(struct megasas_instance *instance); static void process_fw_state_change_wq(struct work_struct *work); static void megasas_do_ocr(struct megasas_instance *instance) { if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) { *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN); } instance->instancet->disable_intr(instance); atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); instance->issuepend_done = 0; atomic_set(&instance->fw_outstanding, 0); megasas_internal_reset_defer_cmds(instance); process_fw_state_change_wq(&instance->work_init); } static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance, int initial) { struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL; dma_addr_t new_affiliation_111_h; int ld, retval = 0; u8 thisVf; cmd = megasas_get_cmd(instance); if (!cmd) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:" "Failed to get cmd for scsi%d\n", instance->host->host_no); return -ENOMEM; } dcmd = &cmd->frame->dcmd; if (!instance->vf_affiliation_111) { dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF " "affiliation for scsi%d\n", instance->host->host_no); megasas_return_cmd(instance, cmd); return -ENOMEM; } if (initial) memset(instance->vf_affiliation_111, 0, sizeof(struct MR_LD_VF_AFFILIATION_111)); else { new_affiliation_111 = dma_alloc_coherent(&instance->pdev->dev, sizeof(struct MR_LD_VF_AFFILIATION_111), &new_affiliation_111_h, GFP_KERNEL); if (!new_affiliation_111) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " "memory for new affiliation for scsi%d\n", instance->host->host_no); megasas_return_cmd(instance, cmd); return -ENOMEM; } } memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = MFI_STAT_INVALID_STATUS; dcmd->sge_count = 1; dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111)); dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111); if (initial) dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->vf_affiliation_111_h); else dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(new_affiliation_111_h); dcmd->sgl.sge32[0].length = cpu_to_le32( sizeof(struct MR_LD_VF_AFFILIATION_111)); dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for " "scsi%d\n", instance->host->host_no); if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) { dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD" " failed with status 0x%x for scsi%d\n", dcmd->cmd_status, instance->host->host_no); retval = 1; /* Do a scan if we couldn't get affiliation */ goto out; } if (!initial) { thisVf = new_affiliation_111->thisVf; for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++) if (instance->vf_affiliation_111->map[ld].policy[thisVf] != new_affiliation_111->map[ld].policy[thisVf]) { dev_warn(&instance->pdev->dev, "SR-IOV: " "Got new LD/VF affiliation for scsi%d\n", instance->host->host_no); memcpy(instance->vf_affiliation_111, new_affiliation_111, sizeof(struct MR_LD_VF_AFFILIATION_111)); retval = 1; goto out; } } out: if (new_affiliation_111) { dma_free_coherent(&instance->pdev->dev, sizeof(struct MR_LD_VF_AFFILIATION_111), new_affiliation_111, new_affiliation_111_h); } megasas_return_cmd(instance, cmd); return retval; } static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance, int initial) { struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; struct MR_LD_VF_AFFILIATION *new_affiliation = NULL; struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL; dma_addr_t new_affiliation_h; int i, j, retval = 0, found = 0, doscan = 0; u8 thisVf; cmd = megasas_get_cmd(instance); if (!cmd) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: " "Failed to get cmd for scsi%d\n", instance->host->host_no); return -ENOMEM; } dcmd = &cmd->frame->dcmd; if (!instance->vf_affiliation) { dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF " "affiliation for scsi%d\n", instance->host->host_no); megasas_return_cmd(instance, cmd); return -ENOMEM; } if (initial) memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION)); else { new_affiliation = dma_alloc_coherent(&instance->pdev->dev, (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION), &new_affiliation_h, GFP_KERNEL); if (!new_affiliation) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " "memory for new affiliation for scsi%d\n", instance->host->host_no); megasas_return_cmd(instance, cmd); return -ENOMEM; } } memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = MFI_STAT_INVALID_STATUS; dcmd->sge_count = 1; dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION)); dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS); if (initial) dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->vf_affiliation_h); else dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(new_affiliation_h); dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION)); dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for " "scsi%d\n", instance->host->host_no); if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) { dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD" " failed with status 0x%x for scsi%d\n", dcmd->cmd_status, instance->host->host_no); retval = 1; /* Do a scan if we couldn't get affiliation */ goto out; } if (!initial) { if (!new_affiliation->ldCount) { dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF " "affiliation for passive path for scsi%d\n", instance->host->host_no); retval = 1; goto out; } newmap = new_affiliation->map; savedmap = instance->vf_affiliation->map; thisVf = new_affiliation->thisVf; for (i = 0 ; i < new_affiliation->ldCount; i++) { found = 0; for (j = 0; j < instance->vf_affiliation->ldCount; j++) { if (newmap->ref.targetId == savedmap->ref.targetId) { found = 1; if (newmap->policy[thisVf] != savedmap->policy[thisVf]) { doscan = 1; goto out; } } savedmap = (struct MR_LD_VF_MAP *) ((unsigned char *)savedmap + savedmap->size); } if (!found && newmap->policy[thisVf] != MR_LD_ACCESS_HIDDEN) { doscan = 1; goto out; } newmap = (struct MR_LD_VF_MAP *) ((unsigned char *)newmap + newmap->size); } newmap = new_affiliation->map; savedmap = instance->vf_affiliation->map; for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) { found = 0; for (j = 0 ; j < new_affiliation->ldCount; j++) { if (savedmap->ref.targetId == newmap->ref.targetId) { found = 1; if (savedmap->policy[thisVf] != newmap->policy[thisVf]) { doscan = 1; goto out; } } newmap = (struct MR_LD_VF_MAP *) ((unsigned char *)newmap + newmap->size); } if (!found && savedmap->policy[thisVf] != MR_LD_ACCESS_HIDDEN) { doscan = 1; goto out; } savedmap = (struct MR_LD_VF_MAP *) ((unsigned char *)savedmap + savedmap->size); } } out: if (doscan) { dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF " "affiliation for scsi%d\n", instance->host->host_no); memcpy(instance->vf_affiliation, new_affiliation, new_affiliation->size); retval = 1; } if (new_affiliation) dma_free_coherent(&instance->pdev->dev, (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION), new_affiliation, new_affiliation_h); megasas_return_cmd(instance, cmd); return retval; } /* This function will get the current SR-IOV LD/VF affiliation */ static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, int initial) { int retval; if (instance->PlasmaFW111) retval = megasas_get_ld_vf_affiliation_111(instance, initial); else retval = megasas_get_ld_vf_affiliation_12(instance, initial); return retval; } /* This function will tell FW to start the SR-IOV heartbeat */ int megasas_sriov_start_heartbeat(struct megasas_instance *instance, int initial) { struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; int retval = 0; cmd = megasas_get_cmd(instance); if (!cmd) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: " "Failed to get cmd for scsi%d\n", instance->host->host_no); return -ENOMEM; } dcmd = &cmd->frame->dcmd; if (initial) { instance->hb_host_mem = dma_alloc_coherent(&instance->pdev->dev, sizeof(struct MR_CTRL_HB_HOST_MEM), &instance->hb_host_mem_h, GFP_KERNEL); if (!instance->hb_host_mem) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate" " memory for heartbeat host memory for scsi%d\n", instance->host->host_no); retval = -ENOMEM; goto out; } } memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM)); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = MFI_STAT_INVALID_STATUS; dcmd->sge_count = 1; dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM)); dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC); megasas_set_dma_settings(instance, dcmd, instance->hb_host_mem_h, sizeof(struct MR_CTRL_HB_HOST_MEM)); dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n", instance->host->host_no); if ((instance->adapter_type != MFI_SERIES) && !instance->mask_interrupts) retval = megasas_issue_blocked_cmd(instance, cmd, MEGASAS_ROUTINE_WAIT_TIME_VF); else retval = megasas_issue_polled(instance, cmd); if (retval) { dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST" "_MEM_ALLOC DCMD %s for scsi%d\n", (dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ? "timed out" : "failed", instance->host->host_no); retval = 1; } out: megasas_return_cmd(instance, cmd); return retval; } /* Handler for SR-IOV heartbeat */ static void megasas_sriov_heartbeat_handler(struct timer_list *t) { struct megasas_instance *instance = from_timer(instance, t, sriov_heartbeat_timer); if (instance->hb_host_mem->HB.fwCounter != instance->hb_host_mem->HB.driverCounter) { instance->hb_host_mem->HB.driverCounter = instance->hb_host_mem->HB.fwCounter; mod_timer(&instance->sriov_heartbeat_timer, jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); } else { dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never " "completed for scsi%d\n", instance->host->host_no); schedule_work(&instance->work_init); } } /** * megasas_wait_for_outstanding - Wait for all outstanding cmds * @instance: Adapter soft state * * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to * complete all its outstanding commands. Returns error if one or more IOs * are pending after this time period. It also marks the controller dead. */ static int megasas_wait_for_outstanding(struct megasas_instance *instance) { int i, sl, outstanding; u32 reset_index; u32 wait_time = MEGASAS_RESET_WAIT_TIME; unsigned long flags; struct list_head clist_local; struct megasas_cmd *reset_cmd; u32 fw_state; if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n", __func__, __LINE__); return FAILED; } if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { INIT_LIST_HEAD(&clist_local); spin_lock_irqsave(&instance->hba_lock, flags); list_splice_init(&instance->internal_reset_pending_q, &clist_local); spin_unlock_irqrestore(&instance->hba_lock, flags); dev_notice(&instance->pdev->dev, "HBA reset wait ...\n"); for (i = 0; i < wait_time; i++) { msleep(1000); if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) break; } if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n"); atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR); return FAILED; } reset_index = 0; while (!list_empty(&clist_local)) { reset_cmd = list_entry((&clist_local)->next, struct megasas_cmd, list); list_del_init(&reset_cmd->list); if (reset_cmd->scmd) { reset_cmd->scmd->result = DID_REQUEUE << 16; dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n", reset_index, reset_cmd, reset_cmd->scmd->cmnd[0]); scsi_done(reset_cmd->scmd); megasas_return_cmd(instance, reset_cmd); } else if (reset_cmd->sync_cmd) { dev_notice(&instance->pdev->dev, "%p synch cmds" "reset queue\n", reset_cmd); reset_cmd->cmd_status_drv = DCMD_INIT; instance->instancet->fire_cmd(instance, reset_cmd->frame_phys_addr, 0, instance->reg_set); } else { dev_notice(&instance->pdev->dev, "%p unexpected" "cmds lst\n", reset_cmd); } reset_index++; } return SUCCESS; } for (i = 0; i < resetwaittime; i++) { outstanding = atomic_read(&instance->fw_outstanding); if (!outstanding) break; if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { dev_notice(&instance->pdev->dev, "[%2d]waiting for %d " "commands to complete\n",i,outstanding); /* * Call cmd completion routine. Cmd to be * be completed directly without depending on isr. */ megasas_complete_cmd_dpc((unsigned long)instance); } msleep(1000); } i = 0; outstanding = atomic_read(&instance->fw_outstanding); fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK; if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL))) goto no_outstanding; if (instance->disableOnlineCtrlReset) goto kill_hba_and_failed; do { if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) { dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, outstanding 0x%x\n", __func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding)); if (i == 3) goto kill_hba_and_failed; megasas_do_ocr(instance); if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n", __func__, __LINE__); return FAILED; } dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n", __func__, __LINE__); for (sl = 0; sl < 10; sl++) msleep(500); outstanding = atomic_read(&instance->fw_outstanding); fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK; if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL))) goto no_outstanding; } i++; } while (i <= 3); no_outstanding: dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n", __func__, __LINE__); return SUCCESS; kill_hba_and_failed: /* Reset not supported, kill adapter */ dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d" " disableOnlineCtrlReset %d fw_outstanding %d \n", __func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset, atomic_read(&instance->fw_outstanding)); megasas_dump_pending_frames(instance); megaraid_sas_kill_hba(instance); return FAILED; } /** * megasas_generic_reset - Generic reset routine * @scmd: Mid-layer SCSI command * * This routine implements a generic reset handler for device, bus and host * reset requests. Device, bus and host specific reset handlers can use this * function after they do their specific tasks. */ static int megasas_generic_reset(struct scsi_cmnd *scmd) { int ret_val; struct megasas_instance *instance; instance = (struct megasas_instance *)scmd->device->host->hostdata; scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n", scmd->cmnd[0], scmd->retries); if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n"); return FAILED; } ret_val = megasas_wait_for_outstanding(instance); if (ret_val == SUCCESS) dev_notice(&instance->pdev->dev, "reset successful\n"); else dev_err(&instance->pdev->dev, "failed to do reset\n"); return ret_val; } /** * megasas_reset_timer - quiesce the adapter if required * @scmd: scsi cmnd * * Sets the FW busy flag and reduces the host->can_queue if the * cmd has not been completed within the timeout period. */ static enum scsi_timeout_action megasas_reset_timer(struct scsi_cmnd *scmd) { struct megasas_instance *instance; unsigned long flags; if (time_after(jiffies, scmd->jiffies_at_alloc + (scmd_timeout * 2) * HZ)) { return SCSI_EH_NOT_HANDLED; } instance = (struct megasas_instance *)scmd->device->host->hostdata; if (!(instance->flag & MEGASAS_FW_BUSY)) { /* FW is busy, throttle IO */ spin_lock_irqsave(instance->host->host_lock, flags); instance->host->can_queue = instance->throttlequeuedepth; instance->last_time = jiffies; instance->flag |= MEGASAS_FW_BUSY; spin_unlock_irqrestore(instance->host->host_lock, flags); } return SCSI_EH_RESET_TIMER; } /** * megasas_dump - This function will print hexdump of provided buffer. * @buf: Buffer to be dumped * @sz: Size in bytes * @format: Different formats of dumping e.g. format=n will * cause only 'n' 32 bit words to be dumped in a single * line. */ inline void megasas_dump(void *buf, int sz, int format) { int i; __le32 *buf_loc = (__le32 *)buf; for (i = 0; i < (sz / sizeof(__le32)); i++) { if ((i % format) == 0) { if (i != 0) printk(KERN_CONT "\n"); printk(KERN_CONT "%08x: ", (i * 4)); } printk(KERN_CONT "%08x ", le32_to_cpu(buf_loc[i])); } printk(KERN_CONT "\n"); } /** * megasas_dump_reg_set - This function will print hexdump of register set * @reg_set: Register set to be dumped */ inline void megasas_dump_reg_set(void __iomem *reg_set) { unsigned int i, sz = 256; u32 __iomem *reg = (u32 __iomem *)reg_set; for (i = 0; i < (sz / sizeof(u32)); i++) printk("%08x: %08x\n", (i * 4), readl(&reg[i])); } /** * megasas_dump_fusion_io - This function will print key details * of SCSI IO * @scmd: SCSI command pointer of SCSI IO */ void megasas_dump_fusion_io(struct scsi_cmnd *scmd) { struct megasas_cmd_fusion *cmd = megasas_priv(scmd)->cmd_priv; union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; struct megasas_instance *instance; instance = (struct megasas_instance *)scmd->device->host->hostdata; scmd_printk(KERN_INFO, scmd, "scmd: (0x%p) retries: 0x%x allowed: 0x%x\n", scmd, scmd->retries, scmd->allowed); scsi_print_command(scmd); if (cmd) { req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc; scmd_printk(KERN_INFO, scmd, "Request descriptor details:\n"); scmd_printk(KERN_INFO, scmd, "RequestFlags:0x%x MSIxIndex:0x%x SMID:0x%x LMID:0x%x DevHandle:0x%x\n", req_desc->SCSIIO.RequestFlags, req_desc->SCSIIO.MSIxIndex, req_desc->SCSIIO.SMID, req_desc->SCSIIO.LMID, req_desc->SCSIIO.DevHandle); printk(KERN_INFO "IO request frame:\n"); megasas_dump(cmd->io_request, MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE, 8); printk(KERN_INFO "Chain frame:\n"); megasas_dump(cmd->sg_frame, instance->max_chain_frame_sz, 8); } } /* * megasas_dump_sys_regs - This function will dump system registers through * sysfs. * @reg_set: Pointer to System register set. * @buf: Buffer to which output is to be written. * @return: Number of bytes written to buffer. */ static inline ssize_t megasas_dump_sys_regs(void __iomem *reg_set, char *buf) { unsigned int i, sz = 256; int bytes_wrote = 0; char *loc = (char *)buf; u32 __iomem *reg = (u32 __iomem *)reg_set; for (i = 0; i < sz / sizeof(u32); i++) { bytes_wrote += scnprintf(loc + bytes_wrote, PAGE_SIZE - bytes_wrote, "%08x: %08x\n", (i * 4), readl(&reg[i])); } return bytes_wrote; } /** * megasas_reset_bus_host - Bus & host reset handler entry point * @scmd: Mid-layer SCSI command */ static int megasas_reset_bus_host(struct scsi_cmnd *scmd) { int ret; struct megasas_instance *instance; instance = (struct megasas_instance *)scmd->device->host->hostdata; scmd_printk(KERN_INFO, scmd, "OCR is requested due to IO timeout!!\n"); scmd_printk(KERN_INFO, scmd, "SCSI host state: %d SCSI host busy: %d FW outstanding: %d\n", scmd->device->host->shost_state, scsi_host_busy(scmd->device->host), atomic_read(&instance->fw_outstanding)); /* * First wait for all commands to complete */ if (instance->adapter_type == MFI_SERIES) { ret = megasas_generic_reset(scmd); } else { megasas_dump_fusion_io(scmd); ret = megasas_reset_fusion(scmd->device->host, SCSIIO_TIMEOUT_OCR); } return ret; } /** * megasas_task_abort - Issues task abort request to firmware * (supported only for fusion adapters) * @scmd: SCSI command pointer */ static int megasas_task_abort(struct scsi_cmnd *scmd) { int ret; struct megasas_instance *instance; instance = (struct megasas_instance *)scmd->device->host->hostdata; if (instance->adapter_type != MFI_SERIES) ret = megasas_task_abort_fusion(scmd); else { sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n"); ret = FAILED; } return ret; } /** * megasas_reset_target: Issues target reset request to firmware * (supported only for fusion adapters) * @scmd: SCSI command pointer */ static int megasas_reset_target(struct scsi_cmnd *scmd) { int ret; struct megasas_instance *instance; instance = (struct megasas_instance *)scmd->device->host->hostdata; if (instance->adapter_type != MFI_SERIES) ret = megasas_reset_target_fusion(scmd); else { sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n"); ret = FAILED; } return ret; } /** * megasas_bios_param - Returns disk geometry for a disk * @sdev: device handle * @bdev: block device * @capacity: drive capacity * @geom: geometry parameters */ static int megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[]) { int heads; int sectors; sector_t cylinders; unsigned long tmp; /* Default heads (64) & sectors (32) */ heads = 64; sectors = 32; tmp = heads * sectors; cylinders = capacity; sector_div(cylinders, tmp); /* * Handle extended translation size for logical drives > 1Gb */ if (capacity >= 0x200000) { heads = 255; sectors = 63; tmp = heads*sectors; cylinders = capacity; sector_div(cylinders, tmp); } geom[0] = heads; geom[1] = sectors; geom[2] = cylinders; return 0; } static void megasas_map_queues(struct Scsi_Host *shost) { struct megasas_instance *instance; int qoff = 0, offset; struct blk_mq_queue_map *map; instance = (struct megasas_instance *)shost->hostdata; if (shost->nr_hw_queues == 1) return; offset = instance->low_latency_index_start; /* Setup Default hctx */ map = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; map->nr_queues = instance->msix_vectors - offset; map->queue_offset = 0; blk_mq_pci_map_queues(map, instance->pdev, offset); qoff += map->nr_queues; offset += map->nr_queues; /* we never use READ queue, so can't cheat blk-mq */ shost->tag_set.map[HCTX_TYPE_READ].nr_queues = 0; /* Setup Poll hctx */ map = &shost->tag_set.map[HCTX_TYPE_POLL]; map->nr_queues = instance->iopoll_q_count; if (map->nr_queues) { /* * The poll queue(s) doesn't have an IRQ (and hence IRQ * affinity), so use the regular blk-mq cpu mapping */ map->queue_offset = qoff; blk_mq_map_queues(map); } } static void megasas_aen_polling(struct work_struct *work); /** * megasas_service_aen - Processes an event notification * @instance: Adapter soft state * @cmd: AEN command completed by the ISR * * For AEN, driver sends a command down to FW that is held by the FW till an * event occurs. When an event of interest occurs, FW completes the command * that it was previously holding. * * This routines sends SIGIO signal to processes that have registered with the * driver for AEN. */ static void megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd) { unsigned long flags; /* * Don't signal app if it is just an aborted previously registered aen */ if ((!cmd->abort_aen) && (instance->unload == 0)) { spin_lock_irqsave(&poll_aen_lock, flags); megasas_poll_wait_aen = 1; spin_unlock_irqrestore(&poll_aen_lock, flags); wake_up(&megasas_poll_wait); kill_fasync(&megasas_async_queue, SIGIO, POLL_IN); } else cmd->abort_aen = 0; instance->aen_cmd = NULL; megasas_return_cmd(instance, cmd); if ((instance->unload == 0) && ((instance->issuepend_done == 1))) { struct megasas_aen_event *ev; ev = kzalloc(sizeof(*ev), GFP_ATOMIC); if (!ev) { dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n"); } else { ev->instance = instance; instance->ev = ev; INIT_DELAYED_WORK(&ev->hotplug_work, megasas_aen_polling); schedule_delayed_work(&ev->hotplug_work, 0); } } } static ssize_t fw_crash_buffer_store(struct device *cdev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(cdev); struct megasas_instance *instance = (struct megasas_instance *) shost->hostdata; int val = 0; if (kstrtoint(buf, 0, &val) != 0) return -EINVAL; mutex_lock(&instance->crashdump_lock); instance->fw_crash_buffer_offset = val; mutex_unlock(&instance->crashdump_lock); return strlen(buf); } static ssize_t fw_crash_buffer_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct megasas_instance *instance = (struct megasas_instance *) shost->hostdata; u32 size; unsigned long dmachunk = CRASH_DMA_BUF_SIZE; unsigned long chunk_left_bytes; unsigned long src_addr; u32 buff_offset; mutex_lock(&instance->crashdump_lock); buff_offset = instance->fw_crash_buffer_offset; if (!instance->crash_dump_buf || !((instance->fw_crash_state == AVAILABLE) || (instance->fw_crash_state == COPYING))) { dev_err(&instance->pdev->dev, "Firmware crash dump is not available\n"); mutex_unlock(&instance->crashdump_lock); return -EINVAL; } if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) { dev_err(&instance->pdev->dev, "Firmware crash dump offset is out of range\n"); mutex_unlock(&instance->crashdump_lock); return 0; } size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset; chunk_left_bytes = dmachunk - (buff_offset % dmachunk); size = (size > chunk_left_bytes) ? chunk_left_bytes : size; size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size; src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] + (buff_offset % dmachunk); memcpy(buf, (void *)src_addr, size); mutex_unlock(&instance->crashdump_lock); return size; } static ssize_t fw_crash_buffer_size_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct megasas_instance *instance = (struct megasas_instance *) shost->hostdata; return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long) ((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE); } static ssize_t fw_crash_state_store(struct device *cdev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(cdev); struct megasas_instance *instance = (struct megasas_instance *) shost->hostdata; int val = 0; if (kstrtoint(buf, 0, &val) != 0) return -EINVAL; if ((val <= AVAILABLE || val > COPY_ERROR)) { dev_err(&instance->pdev->dev, "application updates invalid " "firmware crash state\n"); return -EINVAL; } instance->fw_crash_state = val; if ((val == COPIED) || (val == COPY_ERROR)) { mutex_lock(&instance->crashdump_lock); megasas_free_host_crash_buffer(instance); mutex_unlock(&instance->crashdump_lock); if (val == COPY_ERROR) dev_info(&instance->pdev->dev, "application failed to " "copy Firmware crash dump\n"); else dev_info(&instance->pdev->dev, "Firmware crash dump " "copied successfully\n"); } return strlen(buf); } static ssize_t fw_crash_state_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct megasas_instance *instance = (struct megasas_instance *) shost->hostdata; return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state); } static ssize_t page_size_show(struct device *cdev, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1); } static ssize_t ldio_outstanding_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding)); } static ssize_t fw_cmds_outstanding_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->fw_outstanding)); } static ssize_t enable_sdev_max_qd_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; return snprintf(buf, PAGE_SIZE, "%d\n", instance->enable_sdev_max_qd); } static ssize_t enable_sdev_max_qd_store(struct device *cdev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(cdev); struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; u32 val = 0; bool is_target_prop; int ret_target_prop = DCMD_FAILED; struct scsi_device *sdev; if (kstrtou32(buf, 0, &val) != 0) { pr_err("megasas: could not set enable_sdev_max_qd\n"); return -EINVAL; } mutex_lock(&instance->reset_mutex); if (val) instance->enable_sdev_max_qd = true; else instance->enable_sdev_max_qd = false; shost_for_each_device(sdev, shost) { ret_target_prop = megasas_get_target_prop(instance, sdev); is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false; megasas_set_fw_assisted_qd(sdev, is_target_prop); } mutex_unlock(&instance->reset_mutex); return strlen(buf); } static ssize_t dump_system_regs_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; return megasas_dump_sys_regs(instance->reg_set, buf); } static ssize_t raid_map_id_show(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(cdev); struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)instance->map_id); } static DEVICE_ATTR_RW(fw_crash_buffer); static DEVICE_ATTR_RO(fw_crash_buffer_size); static DEVICE_ATTR_RW(fw_crash_state); static DEVICE_ATTR_RO(page_size); static DEVICE_ATTR_RO(ldio_outstanding); static DEVICE_ATTR_RO(fw_cmds_outstanding); static DEVICE_ATTR_RW(enable_sdev_max_qd); static DEVICE_ATTR_RO(dump_system_regs); static DEVICE_ATTR_RO(raid_map_id); static struct attribute *megaraid_host_attrs[] = { &dev_attr_fw_crash_buffer_size.attr, &dev_attr_fw_crash_buffer.attr, &dev_attr_fw_crash_state.attr, &dev_attr_page_size.attr, &dev_attr_ldio_outstanding.attr, &dev_attr_fw_cmds_outstanding.attr, &dev_attr_enable_sdev_max_qd.attr, &dev_attr_dump_system_regs.attr, &dev_attr_raid_map_id.attr, NULL, }; ATTRIBUTE_GROUPS(megaraid_host); /* * Scsi host template for megaraid_sas driver */ static const struct scsi_host_template megasas_template = { .module = THIS_MODULE, .name = "Avago SAS based MegaRAID driver", .proc_name = "megaraid_sas", .slave_configure = megasas_slave_configure, .slave_alloc = megasas_slave_alloc, .slave_destroy = megasas_slave_destroy, .queuecommand = megasas_queue_command, .eh_target_reset_handler = megasas_reset_target, .eh_abort_handler = megasas_task_abort, .eh_host_reset_handler = megasas_reset_bus_host, .eh_timed_out = megasas_reset_timer, .shost_groups = megaraid_host_groups, .bios_param = megasas_bios_param, .map_queues = megasas_map_queues, .mq_poll = megasas_blk_mq_poll, .change_queue_depth = scsi_change_queue_depth, .max_segment_size = 0xffffffff, .cmd_size = sizeof(struct megasas_cmd_priv), }; /** * megasas_complete_int_cmd - Completes an internal command * @instance: Adapter soft state * @cmd: Command to be completed * * The megasas_issue_blocked_cmd() function waits for a command to complete * after it issues a command. This function wakes up that waiting routine by * calling wake_up() on the wait queue. */ static void megasas_complete_int_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) { if (cmd->cmd_status_drv == DCMD_INIT) cmd->cmd_status_drv = (cmd->frame->io.cmd_status == MFI_STAT_OK) ? DCMD_SUCCESS : DCMD_FAILED; wake_up(&instance->int_cmd_wait_q); } /** * megasas_complete_abort - Completes aborting a command * @instance: Adapter soft state * @cmd: Cmd that was issued to abort another cmd * * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q * after it issues an abort on a previously issued command. This function * wakes up all functions waiting on the same wait queue. */ static void megasas_complete_abort(struct megasas_instance *instance, struct megasas_cmd *cmd) { if (cmd->sync_cmd) { cmd->sync_cmd = 0; cmd->cmd_status_drv = DCMD_SUCCESS; wake_up(&instance->abort_cmd_wait_q); } } static void megasas_set_ld_removed_by_fw(struct megasas_instance *instance) { uint i; for (i = 0; (i < MEGASAS_MAX_LD_IDS); i++) { if (instance->ld_ids_prev[i] != 0xff && instance->ld_ids_from_raidmap[i] == 0xff) { if (megasas_dbg_lvl & LD_PD_DEBUG) dev_info(&instance->pdev->dev, "LD target ID %d removed from RAID map\n", i); instance->ld_tgtid_status[i] = LD_TARGET_ID_DELETED; } } } /** * megasas_complete_cmd - Completes a command * @instance: Adapter soft state * @cmd: Command to be completed * @alt_status: If non-zero, use this value as status to * SCSI mid-layer instead of the value returned * by the FW. This should be used if caller wants * an alternate status (as in the case of aborted * commands) */ void megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, u8 alt_status) { int exception = 0; struct megasas_header *hdr = &cmd->frame->hdr; unsigned long flags; struct fusion_context *fusion = instance->ctrl_context; u32 opcode, status; /* flag for the retry reset */ cmd->retry_for_fw_reset = 0; if (cmd->scmd) megasas_priv(cmd->scmd)->cmd_priv = NULL; switch (hdr->cmd) { case MFI_CMD_INVALID: /* Some older 1068 controller FW may keep a pended MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel when booting the kdump kernel. Ignore this command to prevent a kernel panic on shutdown of the kdump kernel. */ dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command " "completed\n"); dev_warn(&instance->pdev->dev, "If you have a controller " "other than PERC5, please upgrade your firmware\n"); break; case MFI_CMD_PD_SCSI_IO: case MFI_CMD_LD_SCSI_IO: /* * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been * issued either through an IO path or an IOCTL path. If it * was via IOCTL, we will send it to internal completion. */ if (cmd->sync_cmd) { cmd->sync_cmd = 0; megasas_complete_int_cmd(instance, cmd); break; } fallthrough; case MFI_CMD_LD_READ: case MFI_CMD_LD_WRITE: if (alt_status) { cmd->scmd->result = alt_status << 16; exception = 1; } if (exception) { atomic_dec(&instance->fw_outstanding); scsi_dma_unmap(cmd->scmd); scsi_done(cmd->scmd); megasas_return_cmd(instance, cmd); break; } switch (hdr->cmd_status) { case MFI_STAT_OK: cmd->scmd->result = DID_OK << 16; break; case MFI_STAT_SCSI_IO_FAILED: case MFI_STAT_LD_INIT_IN_PROGRESS: cmd->scmd->result = (DID_ERROR << 16) | hdr->scsi_status; break; case MFI_STAT_SCSI_DONE_WITH_ERROR: cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status; if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) { memset(cmd->scmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); memcpy(cmd->scmd->sense_buffer, cmd->sense, hdr->sense_len); } break; case MFI_STAT_LD_OFFLINE: case MFI_STAT_DEVICE_NOT_FOUND: cmd->scmd->result = DID_BAD_TARGET << 16; break; default: dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n", hdr->cmd_status); cmd->scmd->result = DID_ERROR << 16; break; } atomic_dec(&instance->fw_outstanding); scsi_dma_unmap(cmd->scmd); scsi_done(cmd->scmd); megasas_return_cmd(instance, cmd); break; case MFI_CMD_SMP: case MFI_CMD_STP: case MFI_CMD_NVME: case MFI_CMD_TOOLBOX: megasas_complete_int_cmd(instance, cmd); break; case MFI_CMD_DCMD: opcode = le32_to_cpu(cmd->frame->dcmd.opcode); /* Check for LD map update */ if ((opcode == MR_DCMD_LD_MAP_GET_INFO) && (cmd->frame->dcmd.mbox.b[1] == 1)) { fusion->fast_path_io = 0; spin_lock_irqsave(instance->host->host_lock, flags); status = cmd->frame->hdr.cmd_status; instance->map_update_cmd = NULL; if (status != MFI_STAT_OK) { if (status != MFI_STAT_NOT_FOUND) dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n", cmd->frame->hdr.cmd_status); else { megasas_return_cmd(instance, cmd); spin_unlock_irqrestore( instance->host->host_lock, flags); break; } } megasas_return_cmd(instance, cmd); /* * Set fast path IO to ZERO. * Validate Map will set proper value. * Meanwhile all IOs will go as LD IO. */ if (status == MFI_STAT_OK && (MR_ValidateMapInfo(instance, (instance->map_id + 1)))) { instance->map_id++; fusion->fast_path_io = 1; } else { fusion->fast_path_io = 0; } if (instance->adapter_type >= INVADER_SERIES) megasas_set_ld_removed_by_fw(instance); megasas_sync_map_info(instance); spin_unlock_irqrestore(instance->host->host_lock, flags); break; } if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO || opcode == MR_DCMD_CTRL_EVENT_GET) { spin_lock_irqsave(&poll_aen_lock, flags); megasas_poll_wait_aen = 0; spin_unlock_irqrestore(&poll_aen_lock, flags); } /* FW has an updated PD sequence */ if ((opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) && (cmd->frame->dcmd.mbox.b[0] == 1)) { spin_lock_irqsave(instance->host->host_lock, flags); status = cmd->frame->hdr.cmd_status; instance->jbod_seq_cmd = NULL; megasas_return_cmd(instance, cmd); if (status == MFI_STAT_OK) { instance->pd_seq_map_id++; /* Re-register a pd sync seq num cmd */ if (megasas_sync_pd_seq_num(instance, true)) instance->use_seqnum_jbod_fp = false; } else instance->use_seqnum_jbod_fp = false; spin_unlock_irqrestore(instance->host->host_lock, flags); break; } /* * See if got an event notification */ if (opcode == MR_DCMD_CTRL_EVENT_WAIT) megasas_service_aen(instance, cmd); else megasas_complete_int_cmd(instance, cmd); break; case MFI_CMD_ABORT: /* * Cmd issued to abort another cmd returned */ megasas_complete_abort(instance, cmd); break; default: dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n", hdr->cmd); megasas_complete_int_cmd(instance, cmd); break; } } /** * megasas_issue_pending_cmds_again - issue all pending cmds * in FW again because of the fw reset * @instance: Adapter soft state */ static inline void megasas_issue_pending_cmds_again(struct megasas_instance *instance) { struct megasas_cmd *cmd; struct list_head clist_local; union megasas_evt_class_locale class_locale; unsigned long flags; u32 seq_num; INIT_LIST_HEAD(&clist_local); spin_lock_irqsave(&instance->hba_lock, flags); list_splice_init(&instance->internal_reset_pending_q, &clist_local); spin_unlock_irqrestore(&instance->hba_lock, flags); while (!list_empty(&clist_local)) { cmd = list_entry((&clist_local)->next, struct megasas_cmd, list); list_del_init(&cmd->list); if (cmd->sync_cmd || cmd->scmd) { dev_notice(&instance->pdev->dev, "command %p, %p:%d" "detected to be pending while HBA reset\n", cmd, cmd->scmd, cmd->sync_cmd); cmd->retry_for_fw_reset++; if (cmd->retry_for_fw_reset == 3) { dev_notice(&instance->pdev->dev, "cmd %p, %p:%d" "was tried multiple times during reset." "Shutting down the HBA\n", cmd, cmd->scmd, cmd->sync_cmd); instance->instancet->disable_intr(instance); atomic_set(&instance->fw_reset_no_pci_access, 1); megaraid_sas_kill_hba(instance); return; } } if (cmd->sync_cmd == 1) { if (cmd->scmd) { dev_notice(&instance->pdev->dev, "unexpected" "cmd attached to internal command!\n"); } dev_notice(&instance->pdev->dev, "%p synchronous cmd" "on the internal reset queue," "issue it again.\n", cmd); cmd->cmd_status_drv = DCMD_INIT; instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, 0, instance->reg_set); } else if (cmd->scmd) { dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]" "detected on the internal queue, issue again.\n", cmd, cmd->scmd->cmnd[0]); atomic_inc(&instance->fw_outstanding); instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, cmd->frame_count-1, instance->reg_set); } else { dev_notice(&instance->pdev->dev, "%p unexpected cmd on the" "internal reset defer list while re-issue!!\n", cmd); } } if (instance->aen_cmd) { dev_notice(&instance->pdev->dev, "aen_cmd in def process\n"); megasas_return_cmd(instance, instance->aen_cmd); instance->aen_cmd = NULL; } /* * Initiate AEN (Asynchronous Event Notification) */ seq_num = instance->last_seq_num; class_locale.members.reserved = 0; class_locale.members.locale = MR_EVT_LOCALE_ALL; class_locale.members.class = MR_EVT_CLASS_DEBUG; megasas_register_aen(instance, seq_num, class_locale.word); } /* * Move the internal reset pending commands to a deferred queue. * * We move the commands pending at internal reset time to a * pending queue. This queue would be flushed after successful * completion of the internal reset sequence. if the internal reset * did not complete in time, the kernel reset handler would flush * these commands. */ static void megasas_internal_reset_defer_cmds(struct megasas_instance *instance) { struct megasas_cmd *cmd; int i; u16 max_cmd = instance->max_fw_cmds; u32 defer_index; unsigned long flags; defer_index = 0; spin_lock_irqsave(&instance->mfi_pool_lock, flags); for (i = 0; i < max_cmd; i++) { cmd = instance->cmd_list[i]; if (cmd->sync_cmd == 1 || cmd->scmd) { dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p" "on the defer queue as internal\n", defer_index, cmd, cmd->sync_cmd, cmd->scmd); if (!list_empty(&cmd->list)) { dev_notice(&instance->pdev->dev, "ERROR while" " moving this cmd:%p, %d %p, it was" "discovered on some list?\n", cmd, cmd->sync_cmd, cmd->scmd); list_del_init(&cmd->list); } defer_index++; list_add_tail(&cmd->list, &instance->internal_reset_pending_q); } } spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); } static void process_fw_state_change_wq(struct work_struct *work) { struct megasas_instance *instance = container_of(work, struct megasas_instance, work_init); u32 wait; unsigned long flags; if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) { dev_notice(&instance->pdev->dev, "error, recovery st %x\n", atomic_read(&instance->adprecovery)); return ; } if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) { dev_notice(&instance->pdev->dev, "FW detected to be in fault" "state, restarting it...\n"); instance->instancet->disable_intr(instance); atomic_set(&instance->fw_outstanding, 0); atomic_set(&instance->fw_reset_no_pci_access, 1); instance->instancet->adp_reset(instance, instance->reg_set); atomic_set(&instance->fw_reset_no_pci_access, 0); dev_notice(&instance->pdev->dev, "FW restarted successfully," "initiating next stage...\n"); dev_notice(&instance->pdev->dev, "HBA recovery state machine," "state 2 starting...\n"); /* waiting for about 20 second before start the second init */ for (wait = 0; wait < 30; wait++) { msleep(1000); } if (megasas_transition_to_ready(instance, 1)) { dev_notice(&instance->pdev->dev, "adapter not ready\n"); atomic_set(&instance->fw_reset_no_pci_access, 1); megaraid_sas_kill_hba(instance); return ; } if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR) ) { *instance->consumer = *instance->producer; } else { *instance->consumer = 0; *instance->producer = 0; } megasas_issue_init_mfi(instance); spin_lock_irqsave(&instance->hba_lock, flags); atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); spin_unlock_irqrestore(&instance->hba_lock, flags); instance->instancet->enable_intr(instance); megasas_issue_pending_cmds_again(instance); instance->issuepend_done = 1; } } /** * megasas_deplete_reply_queue - Processes all completed commands * @instance: Adapter soft state * @alt_status: Alternate status to be returned to * SCSI mid-layer instead of the status * returned by the FW * Note: this must be called with hba lock held */ static int megasas_deplete_reply_queue(struct megasas_instance *instance, u8 alt_status) { u32 mfiStatus; u32 fw_state; if (instance->instancet->check_reset(instance, instance->reg_set) == 1) return IRQ_HANDLED; mfiStatus = instance->instancet->clear_intr(instance); if (mfiStatus == 0) { /* Hardware may not set outbound_intr_status in MSI-X mode */ if (!instance->msix_vectors) return IRQ_NONE; } instance->mfiStatus = mfiStatus; if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) { fw_state = instance->instancet->read_fw_status_reg( instance) & MFI_STATE_MASK; if (fw_state != MFI_STATE_FAULT) { dev_notice(&instance->pdev->dev, "fw state:%x\n", fw_state); } if ((fw_state == MFI_STATE_FAULT) && (instance->disableOnlineCtrlReset == 0)) { dev_notice(&instance->pdev->dev, "wait adp restart\n"); if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) { *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN); } instance->instancet->disable_intr(instance); atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); instance->issuepend_done = 0; atomic_set(&instance->fw_outstanding, 0); megasas_internal_reset_defer_cmds(instance); dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n", fw_state, atomic_read(&instance->adprecovery)); schedule_work(&instance->work_init); return IRQ_HANDLED; } else { dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n", fw_state, instance->disableOnlineCtrlReset); } } tasklet_schedule(&instance->isr_tasklet); return IRQ_HANDLED; } /** * megasas_isr - isr entry point * @irq: IRQ number * @devp: IRQ context address */ static irqreturn_t megasas_isr(int irq, void *devp) { struct megasas_irq_context *irq_context = devp; struct megasas_instance *instance = irq_context->instance; unsigned long flags; irqreturn_t rc; if (atomic_read(&instance->fw_reset_no_pci_access)) return IRQ_HANDLED; spin_lock_irqsave(&instance->hba_lock, flags); rc = megasas_deplete_reply_queue(instance, DID_OK); spin_unlock_irqrestore(&instance->hba_lock, flags); return rc; } /** * megasas_transition_to_ready - Move the FW to READY state * @instance: Adapter soft state * @ocr: Adapter reset state * * During the initialization, FW passes can potentially be in any one of * several possible states. If the FW in operational, waiting-for-handshake * states, driver must take steps to bring it to ready state. Otherwise, it * has to wait for the ready state. */ int megasas_transition_to_ready(struct megasas_instance *instance, int ocr) { int i; u8 max_wait; u32 fw_state; u32 abs_state, curr_abs_state; abs_state = instance->instancet->read_fw_status_reg(instance); fw_state = abs_state & MFI_STATE_MASK; if (fw_state != MFI_STATE_READY) dev_info(&instance->pdev->dev, "Waiting for FW to come to ready" " state\n"); while (fw_state != MFI_STATE_READY) { switch (fw_state) { case MFI_STATE_FAULT: dev_printk(KERN_ERR, &instance->pdev->dev, "FW in FAULT state, Fault code:0x%x subcode:0x%x func:%s\n", abs_state & MFI_STATE_FAULT_CODE, abs_state & MFI_STATE_FAULT_SUBCODE, __func__); if (ocr) { max_wait = MEGASAS_RESET_WAIT_TIME; break; } else { dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n"); megasas_dump_reg_set(instance->reg_set); return -ENODEV; } case MFI_STATE_WAIT_HANDSHAKE: /* * Set the CLR bit in inbound doorbell */ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || (instance->adapter_type != MFI_SERIES)) writel( MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, &instance->reg_set->doorbell); else writel( MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, &instance->reg_set->inbound_doorbell); max_wait = MEGASAS_RESET_WAIT_TIME; break; case MFI_STATE_BOOT_MESSAGE_PENDING: if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || (instance->adapter_type != MFI_SERIES)) writel(MFI_INIT_HOTPLUG, &instance->reg_set->doorbell); else writel(MFI_INIT_HOTPLUG, &instance->reg_set->inbound_doorbell); max_wait = MEGASAS_RESET_WAIT_TIME; break; case MFI_STATE_OPERATIONAL: /* * Bring it to READY state; assuming max wait 10 secs */ instance->instancet->disable_intr(instance); if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || (instance->adapter_type != MFI_SERIES)) { writel(MFI_RESET_FLAGS, &instance->reg_set->doorbell); if (instance->adapter_type != MFI_SERIES) { for (i = 0; i < (10 * 1000); i += 20) { if (megasas_readl( instance, &instance-> reg_set-> doorbell) & 1) msleep(20); else break; } } } else writel(MFI_RESET_FLAGS, &instance->reg_set->inbound_doorbell); max_wait = MEGASAS_RESET_WAIT_TIME; break; case MFI_STATE_UNDEFINED: /* * This state should not last for more than 2 seconds */ max_wait = MEGASAS_RESET_WAIT_TIME; break; case MFI_STATE_BB_INIT: max_wait = MEGASAS_RESET_WAIT_TIME; break; case MFI_STATE_FW_INIT: max_wait = MEGASAS_RESET_WAIT_TIME; break; case MFI_STATE_FW_INIT_2: max_wait = MEGASAS_RESET_WAIT_TIME; break; case MFI_STATE_DEVICE_SCAN: max_wait = MEGASAS_RESET_WAIT_TIME; break; case MFI_STATE_FLUSH_CACHE: max_wait = MEGASAS_RESET_WAIT_TIME; break; default: dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n", fw_state); dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n"); megasas_dump_reg_set(instance->reg_set); return -ENODEV; } /* * The cur_state should not last for more than max_wait secs */ for (i = 0; i < max_wait * 50; i++) { curr_abs_state = instance->instancet-> read_fw_status_reg(instance); if (abs_state == curr_abs_state) { msleep(20); } else break; } /* * Return error if fw_state hasn't changed after max_wait */ if (curr_abs_state == abs_state) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed " "in %d secs\n", fw_state, max_wait); dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n"); megasas_dump_reg_set(instance->reg_set); return -ENODEV; } abs_state = curr_abs_state; fw_state = curr_abs_state & MFI_STATE_MASK; } dev_info(&instance->pdev->dev, "FW now in Ready state\n"); return 0; } /** * megasas_teardown_frame_pool - Destroy the cmd frame DMA pool * @instance: Adapter soft state */ static void megasas_teardown_frame_pool(struct megasas_instance *instance) { int i; u16 max_cmd = instance->max_mfi_cmds; struct megasas_cmd *cmd; if (!instance->frame_dma_pool) return; /* * Return all frames to pool */ for (i = 0; i < max_cmd; i++) { cmd = instance->cmd_list[i]; if (cmd->frame) dma_pool_free(instance->frame_dma_pool, cmd->frame, cmd->frame_phys_addr); if (cmd->sense) dma_pool_free(instance->sense_dma_pool, cmd->sense, cmd->sense_phys_addr); } /* * Now destroy the pool itself */ dma_pool_destroy(instance->frame_dma_pool); dma_pool_destroy(instance->sense_dma_pool); instance->frame_dma_pool = NULL; instance->sense_dma_pool = NULL; } /** * megasas_create_frame_pool - Creates DMA pool for cmd frames * @instance: Adapter soft state * * Each command packet has an embedded DMA memory buffer that is used for * filling MFI frame and the SG list that immediately follows the frame. This * function creates those DMA memory buffers for each command packet by using * PCI pool facility. */ static int megasas_create_frame_pool(struct megasas_instance *instance) { int i; u16 max_cmd; u32 frame_count; struct megasas_cmd *cmd; max_cmd = instance->max_mfi_cmds; /* * For MFI controllers. * max_num_sge = 60 * max_sge_sz = 16 byte (sizeof megasas_sge_skinny) * Total 960 byte (15 MFI frame of 64 byte) * * Fusion adapter require only 3 extra frame. * max_num_sge = 16 (defined as MAX_IOCTL_SGE) * max_sge_sz = 12 byte (sizeof megasas_sge64) * Total 192 byte (3 MFI frame of 64 byte) */ frame_count = (instance->adapter_type == MFI_SERIES) ? (15 + 1) : (3 + 1); instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count; /* * Use DMA pool facility provided by PCI layer */ instance->frame_dma_pool = dma_pool_create("megasas frame pool", &instance->pdev->dev, instance->mfi_frame_size, 256, 0); if (!instance->frame_dma_pool) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n"); return -ENOMEM; } instance->sense_dma_pool = dma_pool_create("megasas sense pool", &instance->pdev->dev, 128, 4, 0); if (!instance->sense_dma_pool) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n"); dma_pool_destroy(instance->frame_dma_pool); instance->frame_dma_pool = NULL; return -ENOMEM; } /* * Allocate and attach a frame to each of the commands in cmd_list. * By making cmd->index as the context instead of the &cmd, we can * always use 32bit context regardless of the architecture */ for (i = 0; i < max_cmd; i++) { cmd = instance->cmd_list[i]; cmd->frame = dma_pool_zalloc(instance->frame_dma_pool, GFP_KERNEL, &cmd->frame_phys_addr); cmd->sense = dma_pool_alloc(instance->sense_dma_pool, GFP_KERNEL, &cmd->sense_phys_addr); /* * megasas_teardown_frame_pool() takes care of freeing * whatever has been allocated */ if (!cmd->frame || !cmd->sense) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "dma_pool_alloc failed\n"); megasas_teardown_frame_pool(instance); return -ENOMEM; } cmd->frame->io.context = cpu_to_le32(cmd->index); cmd->frame->io.pad_0 = 0; if ((instance->adapter_type == MFI_SERIES) && reset_devices) cmd->frame->hdr.cmd = MFI_CMD_INVALID; } return 0; } /** * megasas_free_cmds - Free all the cmds in the free cmd pool * @instance: Adapter soft state */ void megasas_free_cmds(struct megasas_instance *instance) { int i; /* First free the MFI frame pool */ megasas_teardown_frame_pool(instance); /* Free all the commands in the cmd_list */ for (i = 0; i < instance->max_mfi_cmds; i++) kfree(instance->cmd_list[i]); /* Free the cmd_list buffer itself */ kfree(instance->cmd_list); instance->cmd_list = NULL; INIT_LIST_HEAD(&instance->cmd_pool); } /** * megasas_alloc_cmds - Allocates the command packets * @instance: Adapter soft state * * Each command that is issued to the FW, whether IO commands from the OS or * internal commands like IOCTLs, are wrapped in local data structure called * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to * the FW. * * Each frame has a 32-bit field called context (tag). This context is used * to get back the megasas_cmd from the frame when a frame gets completed in * the ISR. Typically the address of the megasas_cmd itself would be used as * the context. But we wanted to keep the differences between 32 and 64 bit * systems to the mininum. We always use 32 bit integers for the context. In * this driver, the 32 bit values are the indices into an array cmd_list. * This array is used only to look up the megasas_cmd given the context. The * free commands themselves are maintained in a linked list called cmd_pool. */ int megasas_alloc_cmds(struct megasas_instance *instance) { int i; int j; u16 max_cmd; struct megasas_cmd *cmd; max_cmd = instance->max_mfi_cmds; /* * instance->cmd_list is an array of struct megasas_cmd pointers. * Allocate the dynamic array first and then allocate individual * commands. */ instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL); if (!instance->cmd_list) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n"); return -ENOMEM; } for (i = 0; i < max_cmd; i++) { instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd), GFP_KERNEL); if (!instance->cmd_list[i]) { for (j = 0; j < i; j++) kfree(instance->cmd_list[j]); kfree(instance->cmd_list); instance->cmd_list = NULL; return -ENOMEM; } } for (i = 0; i < max_cmd; i++) { cmd = instance->cmd_list[i]; memset(cmd, 0, sizeof(struct megasas_cmd)); cmd->index = i; cmd->scmd = NULL; cmd->instance = instance; list_add_tail(&cmd->list, &instance->cmd_pool); } /* * Create a frame pool and assign one frame to each cmd */ if (megasas_create_frame_pool(instance)) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n"); megasas_free_cmds(instance); return -ENOMEM; } return 0; } /* * dcmd_timeout_ocr_possible - Check if OCR is possible based on Driver/FW state. * @instance: Adapter soft state * * Return 0 for only Fusion adapter, if driver load/unload is not in progress * or FW is not under OCR. */ inline int dcmd_timeout_ocr_possible(struct megasas_instance *instance) { if (instance->adapter_type == MFI_SERIES) return KILL_ADAPTER; else if (instance->unload || test_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE, &instance->reset_flags)) return IGNORE_TIMEOUT; else return INITIATE_OCR; } static void megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev) { int ret; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; struct MR_PRIV_DEVICE *mr_device_priv_data; u16 device_id = 0; device_id = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; cmd = megasas_get_cmd(instance); if (!cmd) { dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__); return; } dcmd = &cmd->frame->dcmd; memset(instance->pd_info, 0, sizeof(*instance->pd_info)); memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->mbox.s[0] = cpu_to_le16(device_id); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO)); dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO); megasas_set_dma_settings(instance, dcmd, instance->pd_info_h, sizeof(struct MR_PD_INFO)); if ((instance->adapter_type != MFI_SERIES) && !instance->mask_interrupts) ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); else ret = megasas_issue_polled(instance, cmd); switch (ret) { case DCMD_SUCCESS: mr_device_priv_data = sdev->hostdata; le16_to_cpus((u16 *)&instance->pd_info->state.ddf.pdType); mr_device_priv_data->interface_type = instance->pd_info->state.ddf.pdType.intf; break; case DCMD_TIMEOUT: switch (dcmd_timeout_ocr_possible(instance)) { case INITIATE_OCR: cmd->flags |= DRV_DCMD_SKIP_REFIRE; mutex_unlock(&instance->reset_mutex); megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR); mutex_lock(&instance->reset_mutex); break; case KILL_ADAPTER: megaraid_sas_kill_hba(instance); break; case IGNORE_TIMEOUT: dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", __func__, __LINE__); break; } break; } if (ret != DCMD_TIMEOUT) megasas_return_cmd(instance, cmd); return; } /* * megasas_get_pd_list_info - Returns FW's pd_list structure * @instance: Adapter soft state * @pd_list: pd_list structure * * Issues an internal command (DCMD) to get the FW's controller PD * list structure. This information is mainly used to find out SYSTEM * supported by the FW. */ static int megasas_get_pd_list(struct megasas_instance *instance) { int ret = 0, pd_index = 0; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; struct MR_PD_LIST *ci; struct MR_PD_ADDRESS *pd_addr; if (instance->pd_list_not_supported) { dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY " "not supported by firmware\n"); return ret; } ci = instance->pd_list_buf; cmd = megasas_get_cmd(instance); if (!cmd) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n"); return -ENOMEM; } dcmd = &cmd->frame->dcmd; memset(ci, 0, sizeof(*ci)); memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST; dcmd->mbox.b[1] = 0; dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = MFI_STAT_INVALID_STATUS; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)); dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY); megasas_set_dma_settings(instance, dcmd, instance->pd_list_buf_h, (MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST))); if ((instance->adapter_type != MFI_SERIES) && !instance->mask_interrupts) ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); else ret = megasas_issue_polled(instance, cmd); switch (ret) { case DCMD_FAILED: dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY " "failed/not supported by firmware\n"); if (instance->adapter_type != MFI_SERIES) megaraid_sas_kill_hba(instance); else instance->pd_list_not_supported = 1; break; case DCMD_TIMEOUT: switch (dcmd_timeout_ocr_possible(instance)) { case INITIATE_OCR: cmd->flags |= DRV_DCMD_SKIP_REFIRE; /* * DCMD failed from AEN path. * AEN path already hold reset_mutex to avoid PCI access * while OCR is in progress. */ mutex_unlock(&instance->reset_mutex); megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR); mutex_lock(&instance->reset_mutex); break; case KILL_ADAPTER: megaraid_sas_kill_hba(instance); break; case IGNORE_TIMEOUT: dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n", __func__, __LINE__); break; } break; case DCMD_SUCCESS: pd_addr = ci->addr; if (megasas_dbg_lvl & LD_PD_DEBUG) dev_info(&instance->pdev->dev, "%s, sysPD count: 0x%x\n", __func__, le32_to_cpu(ci->count)); if ((le32_to_cpu(ci->count) > (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) break; memset(instance->local_pd_list, 0, MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)); for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) { instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid = le16_to_cpu(pd_addr->deviceId); instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType = pd_addr->scsiDevType; instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState = MR_PD_STATE_SYSTEM; if (megasas_dbg_lvl & LD_PD_DEBUG) dev_info(&instance->pdev->dev, "PD%d: targetID: 0x%03x deviceType:0x%x\n", pd_index, le16_to_cpu(pd_addr->deviceId), pd_addr->scsiDevType); pd_addr++; } memcpy(instance->pd_list, instance->local_pd_list, sizeof(instance->pd_list)); break; } if (ret != DCMD_TIMEOUT) megasas_return_cmd(instance, cmd); return ret; } /* * megasas_get_ld_list_info - Returns FW's ld_list structure * @instance: Adapter soft state * @ld_list: ld_list structure * * Issues an internal command (DCMD) to get the FW's controller PD * list structure. This information is mainly used to find out SYSTEM * supported by the FW. */ static int megasas_get_ld_list(struct megasas_instance *instance) { int ret = 0, ld_index = 0, ids = 0; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; struct MR_LD_LIST *ci; dma_addr_t ci_h = 0; u32 ld_count; ci = instance->ld_list_buf; ci_h = instance->ld_list_buf_h; cmd = megasas_get_cmd(instance); if (!cmd) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n"); return -ENOMEM; } dcmd = &cmd->frame->dcmd; memset(ci, 0, sizeof(*ci)); memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); if (instance->supportmax256vd) dcmd->mbox.b[0] = 1; dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = MFI_STAT_INVALID_STATUS; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST)); dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST); dcmd->pad_0 = 0; megasas_set_dma_settings(instance, dcmd, ci_h, sizeof(struct MR_LD_LIST)); if ((instance->adapter_type != MFI_SERIES) && !instance->mask_interrupts) ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); else ret = megasas_issue_polled(instance, cmd); ld_count = le32_to_cpu(ci->ldCount); switch (ret) { case DCMD_FAILED: megaraid_sas_kill_hba(instance); break; case DCMD_TIMEOUT: switch (dcmd_timeout_ocr_possible(instance)) { case INITIATE_OCR: cmd->flags |= DRV_DCMD_SKIP_REFIRE; /* * DCMD failed from AEN path. * AEN path already hold reset_mutex to avoid PCI access * while OCR is in progress. */ mutex_unlock(&instance->reset_mutex); megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR); mutex_lock(&instance->reset_mutex); break; case KILL_ADAPTER: megaraid_sas_kill_hba(instance); break; case IGNORE_TIMEOUT: dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", __func__, __LINE__); break; } break; case DCMD_SUCCESS: if (megasas_dbg_lvl & LD_PD_DEBUG) dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n", __func__, ld_count); if (ld_count > instance->fw_supported_vd_count) break; memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT); for (ld_index = 0; ld_index < ld_count; ld_index++) { if (ci->ldList[ld_index].state != 0) { ids = ci->ldList[ld_index].ref.targetId; instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId; if (megasas_dbg_lvl & LD_PD_DEBUG) dev_info(&instance->pdev->dev, "LD%d: targetID: 0x%03x\n", ld_index, ids); } } break; } if (ret != DCMD_TIMEOUT) megasas_return_cmd(instance, cmd); return ret; } /** * megasas_ld_list_query - Returns FW's ld_list structure * @instance: Adapter soft state * @query_type: ld_list structure type * * Issues an internal command (DCMD) to get the FW's controller PD * list structure. This information is mainly used to find out SYSTEM * supported by the FW. */ static int megasas_ld_list_query(struct megasas_instance *instance, u8 query_type) { int ret = 0, ld_index = 0, ids = 0; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; struct MR_LD_TARGETID_LIST *ci; dma_addr_t ci_h = 0; u32 tgtid_count; ci = instance->ld_targetid_list_buf; ci_h = instance->ld_targetid_list_buf_h; cmd = megasas_get_cmd(instance); if (!cmd) { dev_warn(&instance->pdev->dev, "megasas_ld_list_query: Failed to get cmd\n"); return -ENOMEM; } dcmd = &cmd->frame->dcmd; memset(ci, 0, sizeof(*ci)); memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->mbox.b[0] = query_type; if (instance->supportmax256vd) dcmd->mbox.b[2] = 1; dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = MFI_STAT_INVALID_STATUS; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST)); dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY); dcmd->pad_0 = 0; megasas_set_dma_settings(instance, dcmd, ci_h, sizeof(struct MR_LD_TARGETID_LIST)); if ((instance->adapter_type != MFI_SERIES) && !instance->mask_interrupts) ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); else ret = megasas_issue_polled(instance, cmd); switch (ret) { case DCMD_FAILED: dev_info(&instance->pdev->dev, "DCMD not supported by firmware - %s %d\n", __func__, __LINE__); ret = megasas_get_ld_list(instance); break; case DCMD_TIMEOUT: switch (dcmd_timeout_ocr_possible(instance)) { case INITIATE_OCR: cmd->flags |= DRV_DCMD_SKIP_REFIRE; /* * DCMD failed from AEN path. * AEN path already hold reset_mutex to avoid PCI access * while OCR is in progress. */ mutex_unlock(&instance->reset_mutex); megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR); mutex_lock(&instance->reset_mutex); break; case KILL_ADAPTER: megaraid_sas_kill_hba(instance); break; case IGNORE_TIMEOUT: dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", __func__, __LINE__); break; } break; case DCMD_SUCCESS: tgtid_count = le32_to_cpu(ci->count); if (megasas_dbg_lvl & LD_PD_DEBUG) dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n", __func__, tgtid_count); if ((tgtid_count > (instance->fw_supported_vd_count))) break; memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); for (ld_index = 0; ld_index < tgtid_count; ld_index++) { ids = ci->targetId[ld_index]; instance->ld_ids[ids] = ci->targetId[ld_index]; if (megasas_dbg_lvl & LD_PD_DEBUG) dev_info(&instance->pdev->dev, "LD%d: targetID: 0x%03x\n", ld_index, ci->targetId[ld_index]); } break; } if (ret != DCMD_TIMEOUT) megasas_return_cmd(instance, cmd); return ret; } /** * megasas_host_device_list_query * dcmd.opcode - MR_DCMD_CTRL_DEVICE_LIST_GET * dcmd.mbox - reserved * dcmd.sge IN - ptr to return MR_HOST_DEVICE_LIST structure * Desc: This DCMD will return the combined device list * Status: MFI_STAT_OK - List returned successfully * MFI_STAT_INVALID_CMD - Firmware support for the feature has been * disabled * @instance: Adapter soft state * @is_probe: Driver probe check * Return: 0 if DCMD succeeded * non-zero if failed */ static int megasas_host_device_list_query(struct megasas_instance *instance, bool is_probe) { int ret, i, target_id; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; struct MR_HOST_DEVICE_LIST *ci; u32 count; dma_addr_t ci_h; ci = instance->host_device_list_buf; ci_h = instance->host_device_list_buf_h; cmd = megasas_get_cmd(instance); if (!cmd) { dev_warn(&instance->pdev->dev, "%s: failed to get cmd\n", __func__); return -ENOMEM; } dcmd = &cmd->frame->dcmd; memset(ci, 0, sizeof(*ci)); memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->mbox.b[0] = is_probe ? 0 : 1; dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = MFI_STAT_INVALID_STATUS; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = cpu_to_le32(HOST_DEVICE_LIST_SZ); dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_DEVICE_LIST_GET); megasas_set_dma_settings(instance, dcmd, ci_h, HOST_DEVICE_LIST_SZ); if (!instance->mask_interrupts) { ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); } else { ret = megasas_issue_polled(instance, cmd); cmd->flags |= DRV_DCMD_SKIP_REFIRE; } switch (ret) { case DCMD_SUCCESS: /* Fill the internal pd_list and ld_ids array based on * targetIds returned by FW */ count = le32_to_cpu(ci->count); if (count > (MEGASAS_MAX_PD + MAX_LOGICAL_DRIVES_EXT)) break; if (megasas_dbg_lvl & LD_PD_DEBUG) dev_info(&instance->pdev->dev, "%s, Device count: 0x%x\n", __func__, count); memset(instance->local_pd_list, 0, MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)); memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT); for (i = 0; i < count; i++) { target_id = le16_to_cpu(ci->host_device_list[i].target_id); if (ci->host_device_list[i].flags.u.bits.is_sys_pd) { instance->local_pd_list[target_id].tid = target_id; instance->local_pd_list[target_id].driveType = ci->host_device_list[i].scsi_type; instance->local_pd_list[target_id].driveState = MR_PD_STATE_SYSTEM; if (megasas_dbg_lvl & LD_PD_DEBUG) dev_info(&instance->pdev->dev, "Device %d: PD targetID: 0x%03x deviceType:0x%x\n", i, target_id, ci->host_device_list[i].scsi_type); } else { instance->ld_ids[target_id] = target_id; if (megasas_dbg_lvl & LD_PD_DEBUG) dev_info(&instance->pdev->dev, "Device %d: LD targetID: 0x%03x\n", i, target_id); } } memcpy(instance->pd_list, instance->local_pd_list, sizeof(instance->pd_list)); break; case DCMD_TIMEOUT: switch (dcmd_timeout_ocr_possible(instance)) { case INITIATE_OCR: cmd->flags |= DRV_DCMD_SKIP_REFIRE; mutex_unlock(&instance->reset_mutex); megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR); mutex_lock(&instance->reset_mutex); break; case KILL_ADAPTER: megaraid_sas_kill_hba(instance); break; case IGNORE_TIMEOUT: dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", __func__, __LINE__); break; } break; case DCMD_FAILED: dev_err(&instance->pdev->dev, "%s: MR_DCMD_CTRL_DEVICE_LIST_GET failed\n", __func__); break; } if (ret != DCMD_TIMEOUT) megasas_return_cmd(instance, cmd); return ret; } /* * megasas_update_ext_vd_details : Update details w.r.t Extended VD * instance : Controller's instance */ static void megasas_update_ext_vd_details(struct megasas_instance *instance) { struct fusion_context *fusion; u32 ventura_map_sz = 0; fusion = instance->ctrl_context; /* For MFI based controllers return dummy success */ if (!fusion) return; instance->supportmax256vd = instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs; /* Below is additional check to address future FW enhancement */ if (instance->ctrl_info_buf->max_lds > 64) instance->supportmax256vd = 1; instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL; instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL; if (instance->supportmax256vd) { instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT; instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; } else { instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES; instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; } dev_info(&instance->pdev->dev, "FW provided supportMaxExtLDs: %d\tmax_lds: %d\n", instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs ? 1 : 0, instance->ctrl_info_buf->max_lds); if (instance->max_raid_mapsize) { ventura_map_sz = instance->max_raid_mapsize * MR_MIN_MAP_SIZE; /* 64k */ fusion->current_map_sz = ventura_map_sz; fusion->max_map_sz = ventura_map_sz; } else { fusion->old_map_sz = struct_size_t(struct MR_FW_RAID_MAP, ldSpanMap, instance->fw_supported_vd_count); fusion->new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT); fusion->max_map_sz = max(fusion->old_map_sz, fusion->new_map_sz); if (instance->supportmax256vd) fusion->current_map_sz = fusion->new_map_sz; else fusion->current_map_sz = fusion->old_map_sz; } /* irrespective of FW raid maps, driver raid map is constant */ fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL); } /* * dcmd.opcode - MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES * dcmd.hdr.length - number of bytes to read * dcmd.sge - Ptr to MR_SNAPDUMP_PROPERTIES * Desc: Fill in snapdump properties * Status: MFI_STAT_OK- Command successful */ void megasas_get_snapdump_properties(struct megasas_instance *instance) { int ret = 0; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; struct MR_SNAPDUMP_PROPERTIES *ci; dma_addr_t ci_h = 0; ci = instance->snapdump_prop; ci_h = instance->snapdump_prop_h; if (!ci) return; cmd = megasas_get_cmd(instance); if (!cmd) { dev_dbg(&instance->pdev->dev, "Failed to get a free cmd\n"); return; } dcmd = &cmd->frame->dcmd; memset(ci, 0, sizeof(*ci)); memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = MFI_STAT_INVALID_STATUS; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_SNAPDUMP_PROPERTIES)); dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES); megasas_set_dma_settings(instance, dcmd, ci_h, sizeof(struct MR_SNAPDUMP_PROPERTIES)); if (!instance->mask_interrupts) { ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); } else { ret = megasas_issue_polled(instance, cmd); cmd->flags |= DRV_DCMD_SKIP_REFIRE; } switch (ret) { case DCMD_SUCCESS: instance->snapdump_wait_time = min_t(u8, ci->trigger_min_num_sec_before_ocr, MEGASAS_MAX_SNAP_DUMP_WAIT_TIME); break; case DCMD_TIMEOUT: switch (dcmd_timeout_ocr_possible(instance)) { case INITIATE_OCR: cmd->flags |= DRV_DCMD_SKIP_REFIRE; mutex_unlock(&instance->reset_mutex); megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR); mutex_lock(&instance->reset_mutex); break; case KILL_ADAPTER: megaraid_sas_kill_hba(instance); break; case IGNORE_TIMEOUT: dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", __func__, __LINE__); break; } } if (ret != DCMD_TIMEOUT) megasas_return_cmd(instance, cmd); } /** * megasas_get_ctrl_info - Returns FW's controller structure * @instance: Adapter soft state * * Issues an internal command (DCMD) to get the FW's controller structure. * This information is mainly used to find out the maximum IO transfer per * command supported by the FW. */ int megasas_get_ctrl_info(struct megasas_instance *instance) { int ret = 0; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; struct megasas_ctrl_info *ci; dma_addr_t ci_h = 0; ci = instance->ctrl_info_buf; ci_h = instance->ctrl_info_buf_h; cmd = megasas_get_cmd(instance); if (!cmd) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n"); return -ENOMEM; } dcmd = &cmd->frame->dcmd; memset(ci, 0, sizeof(*ci)); memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = MFI_STAT_INVALID_STATUS; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info)); dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO); dcmd->mbox.b[0] = 1; megasas_set_dma_settings(instance, dcmd, ci_h, sizeof(struct megasas_ctrl_info)); if ((instance->adapter_type != MFI_SERIES) && !instance->mask_interrupts) { ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); } else { ret = megasas_issue_polled(instance, cmd); cmd->flags |= DRV_DCMD_SKIP_REFIRE; } switch (ret) { case DCMD_SUCCESS: /* Save required controller information in * CPU endianness format. */ le32_to_cpus((u32 *)&ci->properties.OnOffProperties); le16_to_cpus((u16 *)&ci->properties.on_off_properties2); le32_to_cpus((u32 *)&ci->adapterOperations2); le32_to_cpus((u32 *)&ci->adapterOperations3); le16_to_cpus((u16 *)&ci->adapter_operations4); le32_to_cpus((u32 *)&ci->adapter_operations5); /* Update the latest Ext VD info. * From Init path, store current firmware details. * From OCR path, detect any firmware properties changes. * in case of Firmware upgrade without system reboot. */ megasas_update_ext_vd_details(instance); instance->support_seqnum_jbod_fp = ci->adapterOperations3.useSeqNumJbodFP; instance->support_morethan256jbod = ci->adapter_operations4.support_pd_map_target_id; instance->support_nvme_passthru = ci->adapter_operations4.support_nvme_passthru; instance->support_pci_lane_margining = ci->adapter_operations5.support_pci_lane_margining; instance->task_abort_tmo = ci->TaskAbortTO; instance->max_reset_tmo = ci->MaxResetTO; /*Check whether controller is iMR or MR */ instance->is_imr = (ci->memory_size ? 0 : 1); instance->snapdump_wait_time = (ci->properties.on_off_properties2.enable_snap_dump ? MEGASAS_DEFAULT_SNAP_DUMP_WAIT_TIME : 0); instance->enable_fw_dev_list = ci->properties.on_off_properties2.enable_fw_dev_list; dev_info(&instance->pdev->dev, "controller type\t: %s(%dMB)\n", instance->is_imr ? "iMR" : "MR", le16_to_cpu(ci->memory_size)); instance->disableOnlineCtrlReset = ci->properties.OnOffProperties.disableOnlineCtrlReset; instance->secure_jbod_support = ci->adapterOperations3.supportSecurityonJBOD; dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n", instance->disableOnlineCtrlReset ? "Disabled" : "Enabled"); dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n", instance->secure_jbod_support ? "Yes" : "No"); dev_info(&instance->pdev->dev, "NVMe passthru support\t: %s\n", instance->support_nvme_passthru ? "Yes" : "No"); dev_info(&instance->pdev->dev, "FW provided TM TaskAbort/Reset timeout\t: %d secs/%d secs\n", instance->task_abort_tmo, instance->max_reset_tmo); dev_info(&instance->pdev->dev, "JBOD sequence map support\t: %s\n", instance->support_seqnum_jbod_fp ? "Yes" : "No"); dev_info(&instance->pdev->dev, "PCI Lane Margining support\t: %s\n", instance->support_pci_lane_margining ? "Yes" : "No"); break; case DCMD_TIMEOUT: switch (dcmd_timeout_ocr_possible(instance)) { case INITIATE_OCR: cmd->flags |= DRV_DCMD_SKIP_REFIRE; mutex_unlock(&instance->reset_mutex); megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR); mutex_lock(&instance->reset_mutex); break; case KILL_ADAPTER: megaraid_sas_kill_hba(instance); break; case IGNORE_TIMEOUT: dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", __func__, __LINE__); break; } break; case DCMD_FAILED: megaraid_sas_kill_hba(instance); break; } if (ret != DCMD_TIMEOUT) megasas_return_cmd(instance, cmd); return ret; } /* * megasas_set_crash_dump_params - Sends address of crash dump DMA buffer * to firmware * * @instance: Adapter soft state * @crash_buf_state - tell FW to turn ON/OFF crash dump feature MR_CRASH_BUF_TURN_OFF = 0 MR_CRASH_BUF_TURN_ON = 1 * @return 0 on success non-zero on failure. * Issues an internal command (DCMD) to set parameters for crash dump feature. * Driver will send address of crash dump DMA buffer and set mbox to tell FW * that driver supports crash dump feature. This DCMD will be sent only if * crash dump feature is supported by the FW. * */ int megasas_set_crash_dump_params(struct megasas_instance *instance, u8 crash_buf_state) { int ret = 0; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; cmd = megasas_get_cmd(instance); if (!cmd) { dev_err(&instance->pdev->dev, "Failed to get a free cmd\n"); return -ENOMEM; } dcmd = &cmd->frame->dcmd; memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->mbox.b[0] = crash_buf_state; dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = MFI_STAT_INVALID_STATUS; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_NONE; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE); dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS); megasas_set_dma_settings(instance, dcmd, instance->crash_dump_h, CRASH_DMA_BUF_SIZE); if ((instance->adapter_type != MFI_SERIES) && !instance->mask_interrupts) ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); else ret = megasas_issue_polled(instance, cmd); if (ret == DCMD_TIMEOUT) { switch (dcmd_timeout_ocr_possible(instance)) { case INITIATE_OCR: cmd->flags |= DRV_DCMD_SKIP_REFIRE; megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR); break; case KILL_ADAPTER: megaraid_sas_kill_hba(instance); break; case IGNORE_TIMEOUT: dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", __func__, __LINE__); break; } } else megasas_return_cmd(instance, cmd); return ret; } /** * megasas_issue_init_mfi - Initializes the FW * @instance: Adapter soft state * * Issues the INIT MFI cmd */ static int megasas_issue_init_mfi(struct megasas_instance *instance) { __le32 context; struct megasas_cmd *cmd; struct megasas_init_frame *init_frame; struct megasas_init_queue_info *initq_info; dma_addr_t init_frame_h; dma_addr_t initq_info_h; /* * Prepare a init frame. Note the init frame points to queue info * structure. Each frame has SGL allocated after first 64 bytes. For * this frame - since we don't need any SGL - we use SGL's space as * queue info structure * * We will not get a NULL command below. We just created the pool. */ cmd = megasas_get_cmd(instance); init_frame = (struct megasas_init_frame *)cmd->frame; initq_info = (struct megasas_init_queue_info *) ((unsigned long)init_frame + 64); init_frame_h = cmd->frame_phys_addr; initq_info_h = init_frame_h + 64; context = init_frame->context; memset(init_frame, 0, MEGAMFI_FRAME_SIZE); memset(initq_info, 0, sizeof(struct megasas_init_queue_info)); init_frame->context = context; initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1); initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h); initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h); initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h); init_frame->cmd = MFI_CMD_INIT; init_frame->cmd_status = MFI_STAT_INVALID_STATUS; init_frame->queue_info_new_phys_addr_lo = cpu_to_le32(lower_32_bits(initq_info_h)); init_frame->queue_info_new_phys_addr_hi = cpu_to_le32(upper_32_bits(initq_info_h)); init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info)); /* * disable the intr before firing the init frame to FW */ instance->instancet->disable_intr(instance); /* * Issue the init frame in polled mode */ if (megasas_issue_polled(instance, cmd)) { dev_err(&instance->pdev->dev, "Failed to init firmware\n"); megasas_return_cmd(instance, cmd); goto fail_fw_init; } megasas_return_cmd(instance, cmd); return 0; fail_fw_init: return -EINVAL; } static u32 megasas_init_adapter_mfi(struct megasas_instance *instance) { u32 context_sz; u32 reply_q_sz; /* * Get various operational parameters from status register */ instance->max_fw_cmds = instance->instancet->read_fw_status_reg(instance) & 0x00FFFF; /* * Reduce the max supported cmds by 1. This is to ensure that the * reply_q_sz (1 more than the max cmd that driver may send) * does not exceed max cmds that the FW can support */ instance->max_fw_cmds = instance->max_fw_cmds-1; instance->max_mfi_cmds = instance->max_fw_cmds; instance->max_num_sge = (instance->instancet->read_fw_status_reg(instance) & 0xFF0000) >> 0x10; /* * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands * are reserved for IOCTL + driver's internal DCMDs. */ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { instance->max_scsi_cmds = (instance->max_fw_cmds - MEGASAS_SKINNY_INT_CMDS); sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS); } else { instance->max_scsi_cmds = (instance->max_fw_cmds - MEGASAS_INT_CMDS); sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS)); } instance->cur_can_queue = instance->max_scsi_cmds; /* * Create a pool of commands */ if (megasas_alloc_cmds(instance)) goto fail_alloc_cmds; /* * Allocate memory for reply queue. Length of reply queue should * be _one_ more than the maximum commands handled by the firmware. * * Note: When FW completes commands, it places corresponding contex * values in this circular reply queue. This circular queue is a fairly * typical producer-consumer queue. FW is the producer (of completed * commands) and the driver is the consumer. */ context_sz = sizeof(u32); reply_q_sz = context_sz * (instance->max_fw_cmds + 1); instance->reply_queue = dma_alloc_coherent(&instance->pdev->dev, reply_q_sz, &instance->reply_queue_h, GFP_KERNEL); if (!instance->reply_queue) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n"); goto fail_reply_queue; } if (megasas_issue_init_mfi(instance)) goto fail_fw_init; if (megasas_get_ctrl_info(instance)) { dev_err(&instance->pdev->dev, "(%d): Could get controller info " "Fail from %s %d\n", instance->unique_id, __func__, __LINE__); goto fail_fw_init; } instance->fw_support_ieee = 0; instance->fw_support_ieee = (instance->instancet->read_fw_status_reg(instance) & 0x04000000); dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d", instance->fw_support_ieee); if (instance->fw_support_ieee) instance->flag_ieee = 1; return 0; fail_fw_init: dma_free_coherent(&instance->pdev->dev, reply_q_sz, instance->reply_queue, instance->reply_queue_h); fail_reply_queue: megasas_free_cmds(instance); fail_alloc_cmds: return 1; } static void megasas_setup_irq_poll(struct megasas_instance *instance) { struct megasas_irq_context *irq_ctx; u32 count, i; count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; /* Initialize IRQ poll */ for (i = 0; i < count; i++) { irq_ctx = &instance->irq_context[i]; irq_ctx->os_irq = pci_irq_vector(instance->pdev, i); irq_ctx->irq_poll_scheduled = false; irq_poll_init(&irq_ctx->irqpoll, instance->threshold_reply_count, megasas_irqpoll); } } /* * megasas_setup_irqs_ioapic - register legacy interrupts. * @instance: Adapter soft state * * Do not enable interrupt, only setup ISRs. * * Return 0 on success. */ static int megasas_setup_irqs_ioapic(struct megasas_instance *instance) { struct pci_dev *pdev; pdev = instance->pdev; instance->irq_context[0].instance = instance; instance->irq_context[0].MSIxIndex = 0; snprintf(instance->irq_context->name, MEGASAS_MSIX_NAME_LEN, "%s%u", "megasas", instance->host->host_no); if (request_irq(pci_irq_vector(pdev, 0), instance->instancet->service_isr, IRQF_SHARED, instance->irq_context->name, &instance->irq_context[0])) { dev_err(&instance->pdev->dev, "Failed to register IRQ from %s %d\n", __func__, __LINE__); return -1; } instance->perf_mode = MR_LATENCY_PERF_MODE; instance->low_latency_index_start = 0; return 0; } /** * megasas_setup_irqs_msix - register MSI-x interrupts. * @instance: Adapter soft state * @is_probe: Driver probe check * * Do not enable interrupt, only setup ISRs. * * Return 0 on success. */ static int megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe) { int i, j; struct pci_dev *pdev; pdev = instance->pdev; /* Try MSI-x */ for (i = 0; i < instance->msix_vectors; i++) { instance->irq_context[i].instance = instance; instance->irq_context[i].MSIxIndex = i; snprintf(instance->irq_context[i].name, MEGASAS_MSIX_NAME_LEN, "%s%u-msix%u", "megasas", instance->host->host_no, i); if (request_irq(pci_irq_vector(pdev, i), instance->instancet->service_isr, 0, instance->irq_context[i].name, &instance->irq_context[i])) { dev_err(&instance->pdev->dev, "Failed to register IRQ for vector %d.\n", i); for (j = 0; j < i; j++) { if (j < instance->low_latency_index_start) irq_update_affinity_hint( pci_irq_vector(pdev, j), NULL); free_irq(pci_irq_vector(pdev, j), &instance->irq_context[j]); } /* Retry irq register for IO_APIC*/ instance->msix_vectors = 0; instance->msix_load_balance = false; if (is_probe) { pci_free_irq_vectors(instance->pdev); return megasas_setup_irqs_ioapic(instance); } else { return -1; } } } return 0; } /* * megasas_destroy_irqs- unregister interrupts. * @instance: Adapter soft state * return: void */ static void megasas_destroy_irqs(struct megasas_instance *instance) { int i; int count; struct megasas_irq_context *irq_ctx; count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; if (instance->adapter_type != MFI_SERIES) { for (i = 0; i < count; i++) { irq_ctx = &instance->irq_context[i]; irq_poll_disable(&irq_ctx->irqpoll); } } if (instance->msix_vectors) for (i = 0; i < instance->msix_vectors; i++) { if (i < instance->low_latency_index_start) irq_update_affinity_hint( pci_irq_vector(instance->pdev, i), NULL); free_irq(pci_irq_vector(instance->pdev, i), &instance->irq_context[i]); } else free_irq(pci_irq_vector(instance->pdev, 0), &instance->irq_context[0]); } /** * megasas_setup_jbod_map - setup jbod map for FP seq_number. * @instance: Adapter soft state * * Return 0 on success. */ void megasas_setup_jbod_map(struct megasas_instance *instance) { int i; struct fusion_context *fusion = instance->ctrl_context; size_t pd_seq_map_sz; pd_seq_map_sz = struct_size_t(struct MR_PD_CFG_SEQ_NUM_SYNC, seq, MAX_PHYSICAL_DEVICES); instance->use_seqnum_jbod_fp = instance->support_seqnum_jbod_fp; if (reset_devices || !fusion || !instance->support_seqnum_jbod_fp) { dev_info(&instance->pdev->dev, "JBOD sequence map is disabled %s %d\n", __func__, __LINE__); instance->use_seqnum_jbod_fp = false; return; } if (fusion->pd_seq_sync[0]) goto skip_alloc; for (i = 0; i < JBOD_MAPS_COUNT; i++) { fusion->pd_seq_sync[i] = dma_alloc_coherent (&instance->pdev->dev, pd_seq_map_sz, &fusion->pd_seq_phys[i], GFP_KERNEL); if (!fusion->pd_seq_sync[i]) { dev_err(&instance->pdev->dev, "Failed to allocate memory from %s %d\n", __func__, __LINE__); if (i == 1) { dma_free_coherent(&instance->pdev->dev, pd_seq_map_sz, fusion->pd_seq_sync[0], fusion->pd_seq_phys[0]); fusion->pd_seq_sync[0] = NULL; } instance->use_seqnum_jbod_fp = false; return; } } skip_alloc: if (!megasas_sync_pd_seq_num(instance, false) && !megasas_sync_pd_seq_num(instance, true)) instance->use_seqnum_jbod_fp = true; else instance->use_seqnum_jbod_fp = false; } static void megasas_setup_reply_map(struct megasas_instance *instance) { const struct cpumask *mask; unsigned int queue, cpu, low_latency_index_start; low_latency_index_start = instance->low_latency_index_start; for (queue = low_latency_index_start; queue < instance->msix_vectors; queue++) { mask = pci_irq_get_affinity(instance->pdev, queue); if (!mask) goto fallback; for_each_cpu(cpu, mask) instance->reply_map[cpu] = queue; } return; fallback: queue = low_latency_index_start; for_each_possible_cpu(cpu) { instance->reply_map[cpu] = queue; if (queue == (instance->msix_vectors - 1)) queue = low_latency_index_start; else queue++; } } /** * megasas_get_device_list - Get the PD and LD device list from FW. * @instance: Adapter soft state * @return: Success or failure * * Issue DCMDs to Firmware to get the PD and LD list. * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list. */ static int megasas_get_device_list(struct megasas_instance *instance) { if (instance->enable_fw_dev_list) { if (megasas_host_device_list_query(instance, true)) return FAILED; } else { if (megasas_get_pd_list(instance) < 0) { dev_err(&instance->pdev->dev, "failed to get PD list\n"); return FAILED; } if (megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) { dev_err(&instance->pdev->dev, "failed to get LD list\n"); return FAILED; } } return SUCCESS; } /** * megasas_set_high_iops_queue_affinity_and_hint - Set affinity and hint * for high IOPS queues * @instance: Adapter soft state * return: void */ static inline void megasas_set_high_iops_queue_affinity_and_hint(struct megasas_instance *instance) { int i; unsigned int irq; const struct cpumask *mask; if (instance->perf_mode == MR_BALANCED_PERF_MODE) { mask = cpumask_of_node(dev_to_node(&instance->pdev->dev)); for (i = 0; i < instance->low_latency_index_start; i++) { irq = pci_irq_vector(instance->pdev, i); irq_set_affinity_and_hint(irq, mask); } } } static int __megasas_alloc_irq_vectors(struct megasas_instance *instance) { int i, irq_flags; struct irq_affinity desc = { .pre_vectors = instance->low_latency_index_start }; struct irq_affinity *descp = &desc; irq_flags = PCI_IRQ_MSIX; if (instance->smp_affinity_enable) irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES; else descp = NULL; /* Do not allocate msix vectors for poll_queues. * msix_vectors is always within a range of FW supported reply queue. */ i = pci_alloc_irq_vectors_affinity(instance->pdev, instance->low_latency_index_start, instance->msix_vectors - instance->iopoll_q_count, irq_flags, descp); return i; } /** * megasas_alloc_irq_vectors - Allocate IRQ vectors/enable MSI-x vectors * @instance: Adapter soft state * return: void */ static void megasas_alloc_irq_vectors(struct megasas_instance *instance) { int i; unsigned int num_msix_req; instance->iopoll_q_count = 0; if ((instance->adapter_type != MFI_SERIES) && poll_queues) { instance->perf_mode = MR_LATENCY_PERF_MODE; instance->low_latency_index_start = 1; /* reserve for default and non-mananged pre-vector. */ if (instance->msix_vectors > (poll_queues + 2)) instance->iopoll_q_count = poll_queues; else instance->iopoll_q_count = 0; num_msix_req = num_online_cpus() + instance->low_latency_index_start; instance->msix_vectors = min(num_msix_req, instance->msix_vectors); } i = __megasas_alloc_irq_vectors(instance); if (((instance->perf_mode == MR_BALANCED_PERF_MODE) || instance->iopoll_q_count) && (i != (instance->msix_vectors - instance->iopoll_q_count))) { if (instance->msix_vectors) pci_free_irq_vectors(instance->pdev); /* Disable Balanced IOPS mode and try realloc vectors */ instance->perf_mode = MR_LATENCY_PERF_MODE; instance->low_latency_index_start = 1; num_msix_req = num_online_cpus() + instance->low_latency_index_start; instance->msix_vectors = min(num_msix_req, instance->msix_vectors); instance->iopoll_q_count = 0; i = __megasas_alloc_irq_vectors(instance); } dev_info(&instance->pdev->dev, "requested/available msix %d/%d poll_queue %d\n", instance->msix_vectors - instance->iopoll_q_count, i, instance->iopoll_q_count); if (i > 0) instance->msix_vectors = i; else instance->msix_vectors = 0; if (instance->smp_affinity_enable) megasas_set_high_iops_queue_affinity_and_hint(instance); } /** * megasas_init_fw - Initializes the FW * @instance: Adapter soft state * * This is the main function for initializing firmware */ static int megasas_init_fw(struct megasas_instance *instance) { u32 max_sectors_1; u32 max_sectors_2, tmp_sectors, msix_enable; u32 scratch_pad_1, scratch_pad_2, scratch_pad_3, status_reg; resource_size_t base_addr; void *base_addr_phys; struct megasas_ctrl_info *ctrl_info = NULL; unsigned long bar_list; int i, j, loop; struct IOV_111 *iovPtr; struct fusion_context *fusion; bool intr_coalescing; unsigned int num_msix_req; u16 lnksta, speed; fusion = instance->ctrl_context; /* Find first memory bar */ bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM); instance->bar = find_first_bit(&bar_list, BITS_PER_LONG); if (pci_request_selected_regions(instance->pdev, 1<<instance->bar, "megasas: LSI")) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n"); return -EBUSY; } base_addr = pci_resource_start(instance->pdev, instance->bar); instance->reg_set = ioremap(base_addr, 8192); if (!instance->reg_set) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n"); goto fail_ioremap; } base_addr_phys = &base_addr; dev_printk(KERN_DEBUG, &instance->pdev->dev, "BAR:0x%lx BAR's base_addr(phys):%pa mapped virt_addr:0x%p\n", instance->bar, base_addr_phys, instance->reg_set); if (instance->adapter_type != MFI_SERIES) instance->instancet = &megasas_instance_template_fusion; else { switch (instance->pdev->device) { case PCI_DEVICE_ID_LSI_SAS1078R: case PCI_DEVICE_ID_LSI_SAS1078DE: instance->instancet = &megasas_instance_template_ppc; break; case PCI_DEVICE_ID_LSI_SAS1078GEN2: case PCI_DEVICE_ID_LSI_SAS0079GEN2: instance->instancet = &megasas_instance_template_gen2; break; case PCI_DEVICE_ID_LSI_SAS0073SKINNY: case PCI_DEVICE_ID_LSI_SAS0071SKINNY: instance->instancet = &megasas_instance_template_skinny; break; case PCI_DEVICE_ID_LSI_SAS1064R: case PCI_DEVICE_ID_DELL_PERC5: default: instance->instancet = &megasas_instance_template_xscale; instance->pd_list_not_supported = 1; break; } } if (megasas_transition_to_ready(instance, 0)) { dev_info(&instance->pdev->dev, "Failed to transition controller to ready from %s!\n", __func__); if (instance->adapter_type != MFI_SERIES) { status_reg = instance->instancet->read_fw_status_reg( instance); if (status_reg & MFI_RESET_ADAPTER) { if (megasas_adp_reset_wait_for_ready (instance, true, 0) == FAILED) goto fail_ready_state; } else { goto fail_ready_state; } } else { atomic_set(&instance->fw_reset_no_pci_access, 1); instance->instancet->adp_reset (instance, instance->reg_set); atomic_set(&instance->fw_reset_no_pci_access, 0); /*waiting for about 30 second before retry*/ ssleep(30); if (megasas_transition_to_ready(instance, 0)) goto fail_ready_state; } dev_info(&instance->pdev->dev, "FW restarted successfully from %s!\n", __func__); } megasas_init_ctrl_params(instance); if (megasas_set_dma_mask(instance)) goto fail_ready_state; if (megasas_alloc_ctrl_mem(instance)) goto fail_alloc_dma_buf; if (megasas_alloc_ctrl_dma_buffers(instance)) goto fail_alloc_dma_buf; fusion = instance->ctrl_context; if (instance->adapter_type >= VENTURA_SERIES) { scratch_pad_2 = megasas_readl(instance, &instance->reg_set->outbound_scratch_pad_2); instance->max_raid_mapsize = ((scratch_pad_2 >> MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) & MR_MAX_RAID_MAP_SIZE_MASK); } instance->enable_sdev_max_qd = enable_sdev_max_qd; switch (instance->adapter_type) { case VENTURA_SERIES: fusion->pcie_bw_limitation = true; break; case AERO_SERIES: fusion->r56_div_offload = true; break; default: break; } /* Check if MSI-X is supported while in ready state */ msix_enable = (instance->instancet->read_fw_status_reg(instance) & 0x4000000) >> 0x1a; if (msix_enable && !msix_disable) { scratch_pad_1 = megasas_readl (instance, &instance->reg_set->outbound_scratch_pad_1); /* Check max MSI-X vectors */ if (fusion) { if (instance->adapter_type == THUNDERBOLT_SERIES) { /* Thunderbolt Series*/ instance->msix_vectors = (scratch_pad_1 & MR_MAX_REPLY_QUEUES_OFFSET) + 1; } else { instance->msix_vectors = ((scratch_pad_1 & MR_MAX_REPLY_QUEUES_EXT_OFFSET) >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1; /* * For Invader series, > 8 MSI-x vectors * supported by FW/HW implies combined * reply queue mode is enabled. * For Ventura series, > 16 MSI-x vectors * supported by FW/HW implies combined * reply queue mode is enabled. */ switch (instance->adapter_type) { case INVADER_SERIES: if (instance->msix_vectors > 8) instance->msix_combined = true; break; case AERO_SERIES: case VENTURA_SERIES: if (instance->msix_vectors > 16) instance->msix_combined = true; break; } if (rdpq_enable) instance->is_rdpq = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ? 1 : 0; if (instance->adapter_type >= INVADER_SERIES && !instance->msix_combined) { instance->msix_load_balance = true; instance->smp_affinity_enable = false; } /* Save 1-15 reply post index address to local memory * Index 0 is already saved from reg offset * MPI2_REPLY_POST_HOST_INDEX_OFFSET */ for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) { instance->reply_post_host_index_addr[loop] = (u32 __iomem *) ((u8 __iomem *)instance->reg_set + MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET + (loop * 0x10)); } } dev_info(&instance->pdev->dev, "firmware supports msix\t: (%d)", instance->msix_vectors); if (msix_vectors) instance->msix_vectors = min(msix_vectors, instance->msix_vectors); } else /* MFI adapters */ instance->msix_vectors = 1; /* * For Aero (if some conditions are met), driver will configure a * few additional reply queues with interrupt coalescing enabled. * These queues with interrupt coalescing enabled are called * High IOPS queues and rest of reply queues (based on number of * logical CPUs) are termed as Low latency queues. * * Total Number of reply queues = High IOPS queues + low latency queues * * For rest of fusion adapters, 1 additional reply queue will be * reserved for management commands, rest of reply queues * (based on number of logical CPUs) will be used for IOs and * referenced as IO queues. * Total Number of reply queues = 1 + IO queues * * MFI adapters supports single MSI-x so single reply queue * will be used for IO and management commands. */ intr_coalescing = (scratch_pad_1 & MR_INTR_COALESCING_SUPPORT_OFFSET) ? true : false; if (intr_coalescing && (num_online_cpus() >= MR_HIGH_IOPS_QUEUE_COUNT) && (instance->msix_vectors == MEGASAS_MAX_MSIX_QUEUES)) instance->perf_mode = MR_BALANCED_PERF_MODE; else instance->perf_mode = MR_LATENCY_PERF_MODE; if (instance->adapter_type == AERO_SERIES) { pcie_capability_read_word(instance->pdev, PCI_EXP_LNKSTA, &lnksta); speed = lnksta & PCI_EXP_LNKSTA_CLS; /* * For Aero, if PCIe link speed is <16 GT/s, then driver should operate * in latency perf mode and enable R1 PCI bandwidth algorithm */ if (speed < 0x4) { instance->perf_mode = MR_LATENCY_PERF_MODE; fusion->pcie_bw_limitation = true; } /* * Performance mode settings provided through module parameter-perf_mode will * take affect only for: * 1. Aero family of adapters. * 2. When user sets module parameter- perf_mode in range of 0-2. */ if ((perf_mode >= MR_BALANCED_PERF_MODE) && (perf_mode <= MR_LATENCY_PERF_MODE)) instance->perf_mode = perf_mode; /* * If intr coalescing is not supported by controller FW, then IOPS * and Balanced modes are not feasible. */ if (!intr_coalescing) instance->perf_mode = MR_LATENCY_PERF_MODE; } if (instance->perf_mode == MR_BALANCED_PERF_MODE) instance->low_latency_index_start = MR_HIGH_IOPS_QUEUE_COUNT; else instance->low_latency_index_start = 1; num_msix_req = num_online_cpus() + instance->low_latency_index_start; instance->msix_vectors = min(num_msix_req, instance->msix_vectors); megasas_alloc_irq_vectors(instance); if (!instance->msix_vectors) instance->msix_load_balance = false; } /* * MSI-X host index 0 is common for all adapter. * It is used for all MPT based Adapters. */ if (instance->msix_combined) { instance->reply_post_host_index_addr[0] = (u32 *)((u8 *)instance->reg_set + MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET); } else { instance->reply_post_host_index_addr[0] = (u32 *)((u8 *)instance->reg_set + MPI2_REPLY_POST_HOST_INDEX_OFFSET); } if (!instance->msix_vectors) { i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY); if (i < 0) goto fail_init_adapter; } megasas_setup_reply_map(instance); dev_info(&instance->pdev->dev, "current msix/online cpus\t: (%d/%d)\n", instance->msix_vectors, (unsigned int)num_online_cpus()); dev_info(&instance->pdev->dev, "RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled"); tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, (unsigned long)instance); /* * Below are default value for legacy Firmware. * non-fusion based controllers */ instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES; instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; /* Get operational params, sge flags, send init cmd to controller */ if (instance->instancet->init_adapter(instance)) goto fail_init_adapter; if (instance->adapter_type >= VENTURA_SERIES) { scratch_pad_3 = megasas_readl(instance, &instance->reg_set->outbound_scratch_pad_3); if ((scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK) >= MR_DEFAULT_NVME_PAGE_SHIFT) instance->nvme_page_size = (1 << (scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK)); dev_info(&instance->pdev->dev, "NVME page size\t: (%d)\n", instance->nvme_page_size); } if (instance->msix_vectors ? megasas_setup_irqs_msix(instance, 1) : megasas_setup_irqs_ioapic(instance)) goto fail_init_adapter; if (instance->adapter_type != MFI_SERIES) megasas_setup_irq_poll(instance); instance->instancet->enable_intr(instance); dev_info(&instance->pdev->dev, "INIT adapter done\n"); megasas_setup_jbod_map(instance); if (megasas_get_device_list(instance) != SUCCESS) { dev_err(&instance->pdev->dev, "%s: megasas_get_device_list failed\n", __func__); goto fail_get_ld_pd_list; } /* stream detection initialization */ if (instance->adapter_type >= VENTURA_SERIES) { fusion->stream_detect_by_ld = kcalloc(MAX_LOGICAL_DRIVES_EXT, sizeof(struct LD_STREAM_DETECT *), GFP_KERNEL); if (!fusion->stream_detect_by_ld) { dev_err(&instance->pdev->dev, "unable to allocate stream detection for pool of LDs\n"); goto fail_get_ld_pd_list; } for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) { fusion->stream_detect_by_ld[i] = kzalloc(sizeof(struct LD_STREAM_DETECT), GFP_KERNEL); if (!fusion->stream_detect_by_ld[i]) { dev_err(&instance->pdev->dev, "unable to allocate stream detect by LD\n "); for (j = 0; j < i; ++j) kfree(fusion->stream_detect_by_ld[j]); kfree(fusion->stream_detect_by_ld); fusion->stream_detect_by_ld = NULL; goto fail_get_ld_pd_list; } fusion->stream_detect_by_ld[i]->mru_bit_map = MR_STREAM_BITMAP; } } /* * Compute the max allowed sectors per IO: The controller info has two * limits on max sectors. Driver should use the minimum of these two. * * 1 << stripe_sz_ops.min = max sectors per strip * * Note that older firmwares ( < FW ver 30) didn't report information * to calculate max_sectors_1. So the number ended up as zero always. */ tmp_sectors = 0; ctrl_info = instance->ctrl_info_buf; max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) * le16_to_cpu(ctrl_info->max_strips_per_io); max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size); tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2); instance->peerIsPresent = ctrl_info->cluster.peerIsPresent; instance->passive = ctrl_info->cluster.passive; memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId)); instance->UnevenSpanSupport = ctrl_info->adapterOperations2.supportUnevenSpans; if (instance->UnevenSpanSupport) { struct fusion_context *fusion = instance->ctrl_context; if (MR_ValidateMapInfo(instance, instance->map_id)) fusion->fast_path_io = 1; else fusion->fast_path_io = 0; } if (ctrl_info->host_interface.SRIOV) { instance->requestorId = ctrl_info->iov.requestorId; if (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) { if (!ctrl_info->adapterOperations2.activePassive) instance->PlasmaFW111 = 1; dev_info(&instance->pdev->dev, "SR-IOV: firmware type: %s\n", instance->PlasmaFW111 ? "1.11" : "new"); if (instance->PlasmaFW111) { iovPtr = (struct IOV_111 *) ((unsigned char *)ctrl_info + IOV_111_OFFSET); instance->requestorId = iovPtr->requestorId; } } dev_info(&instance->pdev->dev, "SRIOV: VF requestorId %d\n", instance->requestorId); } instance->crash_dump_fw_support = ctrl_info->adapterOperations3.supportCrashDump; instance->crash_dump_drv_support = (instance->crash_dump_fw_support && instance->crash_dump_buf); if (instance->crash_dump_drv_support) megasas_set_crash_dump_params(instance, MR_CRASH_BUF_TURN_OFF); else { if (instance->crash_dump_buf) dma_free_coherent(&instance->pdev->dev, CRASH_DMA_BUF_SIZE, instance->crash_dump_buf, instance->crash_dump_h); instance->crash_dump_buf = NULL; } if (instance->snapdump_wait_time) { megasas_get_snapdump_properties(instance); dev_info(&instance->pdev->dev, "Snap dump wait time\t: %d\n", instance->snapdump_wait_time); } dev_info(&instance->pdev->dev, "pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n", le16_to_cpu(ctrl_info->pci.vendor_id), le16_to_cpu(ctrl_info->pci.device_id), le16_to_cpu(ctrl_info->pci.sub_vendor_id), le16_to_cpu(ctrl_info->pci.sub_device_id)); dev_info(&instance->pdev->dev, "unevenspan support : %s\n", instance->UnevenSpanSupport ? "yes" : "no"); dev_info(&instance->pdev->dev, "firmware crash dump : %s\n", instance->crash_dump_drv_support ? "yes" : "no"); dev_info(&instance->pdev->dev, "JBOD sequence map : %s\n", instance->use_seqnum_jbod_fp ? "enabled" : "disabled"); instance->max_sectors_per_req = instance->max_num_sge * SGE_BUFFER_SIZE / 512; if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors)) instance->max_sectors_per_req = tmp_sectors; /* Check for valid throttlequeuedepth module parameter */ if (throttlequeuedepth && throttlequeuedepth <= instance->max_scsi_cmds) instance->throttlequeuedepth = throttlequeuedepth; else instance->throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH; if ((resetwaittime < 1) || (resetwaittime > MEGASAS_RESET_WAIT_TIME)) resetwaittime = MEGASAS_RESET_WAIT_TIME; if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT)) scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT; /* Launch SR-IOV heartbeat timer */ if (instance->requestorId) { if (!megasas_sriov_start_heartbeat(instance, 1)) { megasas_start_timer(instance); } else { instance->skip_heartbeat_timer_del = 1; goto fail_get_ld_pd_list; } } /* * Create and start watchdog thread which will monitor * controller state every 1 sec and trigger OCR when * it enters fault state */ if (instance->adapter_type != MFI_SERIES) if (megasas_fusion_start_watchdog(instance) != SUCCESS) goto fail_start_watchdog; return 0; fail_start_watchdog: if (instance->requestorId && !instance->skip_heartbeat_timer_del) del_timer_sync(&instance->sriov_heartbeat_timer); fail_get_ld_pd_list: instance->instancet->disable_intr(instance); megasas_destroy_irqs(instance); fail_init_adapter: if (instance->msix_vectors) pci_free_irq_vectors(instance->pdev); instance->msix_vectors = 0; fail_alloc_dma_buf: megasas_free_ctrl_dma_buffers(instance); megasas_free_ctrl_mem(instance); fail_ready_state: iounmap(instance->reg_set); fail_ioremap: pci_release_selected_regions(instance->pdev, 1<<instance->bar); dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return -EINVAL; } /** * megasas_release_mfi - Reverses the FW initialization * @instance: Adapter soft state */ static void megasas_release_mfi(struct megasas_instance *instance) { u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1); if (instance->reply_queue) dma_free_coherent(&instance->pdev->dev, reply_q_sz, instance->reply_queue, instance->reply_queue_h); megasas_free_cmds(instance); iounmap(instance->reg_set); pci_release_selected_regions(instance->pdev, 1<<instance->bar); } /** * megasas_get_seq_num - Gets latest event sequence numbers * @instance: Adapter soft state * @eli: FW event log sequence numbers information * * FW maintains a log of all events in a non-volatile area. Upper layers would * usually find out the latest sequence number of the events, the seq number at * the boot etc. They would "read" all the events below the latest seq number * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq * number), they would subsribe to AEN (asynchronous event notification) and * wait for the events to happen. */ static int megasas_get_seq_num(struct megasas_instance *instance, struct megasas_evt_log_info *eli) { struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; struct megasas_evt_log_info *el_info; dma_addr_t el_info_h = 0; int ret; cmd = megasas_get_cmd(instance); if (!cmd) { return -ENOMEM; } dcmd = &cmd->frame->dcmd; el_info = dma_alloc_coherent(&instance->pdev->dev, sizeof(struct megasas_evt_log_info), &el_info_h, GFP_KERNEL); if (!el_info) { megasas_return_cmd(instance, cmd); return -ENOMEM; } memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info)); dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO); megasas_set_dma_settings(instance, dcmd, el_info_h, sizeof(struct megasas_evt_log_info)); ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); if (ret != DCMD_SUCCESS) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); goto dcmd_failed; } /* * Copy the data back into callers buffer */ eli->newest_seq_num = el_info->newest_seq_num; eli->oldest_seq_num = el_info->oldest_seq_num; eli->clear_seq_num = el_info->clear_seq_num; eli->shutdown_seq_num = el_info->shutdown_seq_num; eli->boot_seq_num = el_info->boot_seq_num; dcmd_failed: dma_free_coherent(&instance->pdev->dev, sizeof(struct megasas_evt_log_info), el_info, el_info_h); megasas_return_cmd(instance, cmd); return ret; } /** * megasas_register_aen - Registers for asynchronous event notification * @instance: Adapter soft state * @seq_num: The starting sequence number * @class_locale_word: Class of the event * * This function subscribes for AEN for events beyond the @seq_num. It requests * to be notified if and only if the event is of type @class_locale */ static int megasas_register_aen(struct megasas_instance *instance, u32 seq_num, u32 class_locale_word) { int ret_val; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; union megasas_evt_class_locale curr_aen; union megasas_evt_class_locale prev_aen; /* * If there an AEN pending already (aen_cmd), check if the * class_locale of that pending AEN is inclusive of the new * AEN request we currently have. If it is, then we don't have * to do anything. In other words, whichever events the current * AEN request is subscribing to, have already been subscribed * to. * * If the old_cmd is _not_ inclusive, then we have to abort * that command, form a class_locale that is superset of both * old and current and re-issue to the FW */ curr_aen.word = class_locale_word; if (instance->aen_cmd) { prev_aen.word = le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]); if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) || (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) { dev_info(&instance->pdev->dev, "%s %d out of range class %d send by application\n", __func__, __LINE__, curr_aen.members.class); return 0; } /* * A class whose enum value is smaller is inclusive of all * higher values. If a PROGRESS (= -1) was previously * registered, then a new registration requests for higher * classes need not be sent to FW. They are automatically * included. * * Locale numbers don't have such hierarchy. They are bitmap * values */ if ((prev_aen.members.class <= curr_aen.members.class) && !((prev_aen.members.locale & curr_aen.members.locale) ^ curr_aen.members.locale)) { /* * Previously issued event registration includes * current request. Nothing to do. */ return 0; } else { curr_aen.members.locale |= prev_aen.members.locale; if (prev_aen.members.class < curr_aen.members.class) curr_aen.members.class = prev_aen.members.class; instance->aen_cmd->abort_aen = 1; ret_val = megasas_issue_blocked_abort_cmd(instance, instance-> aen_cmd, 30); if (ret_val) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort " "previous AEN command\n"); return ret_val; } } } cmd = megasas_get_cmd(instance); if (!cmd) return -ENOMEM; dcmd = &cmd->frame->dcmd; memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail)); /* * Prepare DCMD for aen registration */ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail)); dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT); dcmd->mbox.w[0] = cpu_to_le32(seq_num); instance->last_seq_num = seq_num; dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word); megasas_set_dma_settings(instance, dcmd, instance->evt_detail_h, sizeof(struct megasas_evt_detail)); if (instance->aen_cmd != NULL) { megasas_return_cmd(instance, cmd); return 0; } /* * Store reference to the cmd used to register for AEN. When an * application wants us to register for AEN, we have to abort this * cmd and re-register with a new EVENT LOCALE supplied by that app */ instance->aen_cmd = cmd; /* * Issue the aen registration frame */ instance->instancet->issue_dcmd(instance, cmd); return 0; } /* megasas_get_target_prop - Send DCMD with below details to firmware. * * This DCMD will fetch few properties of LD/system PD defined * in MR_TARGET_DEV_PROPERTIES. eg. Queue Depth, MDTS value. * * DCMD send by drivers whenever new target is added to the OS. * * dcmd.opcode - MR_DCMD_DEV_GET_TARGET_PROP * dcmd.mbox.b[0] - DCMD is to be fired for LD or system PD. * 0 = system PD, 1 = LD. * dcmd.mbox.s[1] - TargetID for LD/system PD. * dcmd.sge IN - Pointer to return MR_TARGET_DEV_PROPERTIES. * * @instance: Adapter soft state * @sdev: OS provided scsi device * * Returns 0 on success non-zero on failure. */ int megasas_get_target_prop(struct megasas_instance *instance, struct scsi_device *sdev) { int ret; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; u16 targetId = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; cmd = megasas_get_cmd(instance); if (!cmd) { dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__); return -ENOMEM; } dcmd = &cmd->frame->dcmd; memset(instance->tgt_prop, 0, sizeof(*instance->tgt_prop)); memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->mbox.b[0] = MEGASAS_IS_LOGICAL(sdev); dcmd->mbox.s[1] = cpu_to_le16(targetId); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES)); dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP); megasas_set_dma_settings(instance, dcmd, instance->tgt_prop_h, sizeof(struct MR_TARGET_PROPERTIES)); if ((instance->adapter_type != MFI_SERIES) && !instance->mask_interrupts) ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); else ret = megasas_issue_polled(instance, cmd); switch (ret) { case DCMD_TIMEOUT: switch (dcmd_timeout_ocr_possible(instance)) { case INITIATE_OCR: cmd->flags |= DRV_DCMD_SKIP_REFIRE; mutex_unlock(&instance->reset_mutex); megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR); mutex_lock(&instance->reset_mutex); break; case KILL_ADAPTER: megaraid_sas_kill_hba(instance); break; case IGNORE_TIMEOUT: dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", __func__, __LINE__); break; } break; default: megasas_return_cmd(instance, cmd); } if (ret != DCMD_SUCCESS) dev_err(&instance->pdev->dev, "return from %s %d return value %d\n", __func__, __LINE__, ret); return ret; } /** * megasas_start_aen - Subscribes to AEN during driver load time * @instance: Adapter soft state */ static int megasas_start_aen(struct megasas_instance *instance) { struct megasas_evt_log_info eli; union megasas_evt_class_locale class_locale; /* * Get the latest sequence number from FW */ memset(&eli, 0, sizeof(eli)); if (megasas_get_seq_num(instance, &eli)) return -1; /* * Register AEN with FW for latest sequence number plus 1 */ class_locale.members.reserved = 0; class_locale.members.locale = MR_EVT_LOCALE_ALL; class_locale.members.class = MR_EVT_CLASS_DEBUG; return megasas_register_aen(instance, le32_to_cpu(eli.newest_seq_num) + 1, class_locale.word); } /** * megasas_io_attach - Attaches this driver to SCSI mid-layer * @instance: Adapter soft state */ static int megasas_io_attach(struct megasas_instance *instance) { struct Scsi_Host *host = instance->host; /* * Export parameters required by SCSI mid-layer */ host->unique_id = instance->unique_id; host->can_queue = instance->max_scsi_cmds; host->this_id = instance->init_id; host->sg_tablesize = instance->max_num_sge; if (instance->fw_support_ieee) instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE; /* * Check if the module parameter value for max_sectors can be used */ if (max_sectors && max_sectors < instance->max_sectors_per_req) instance->max_sectors_per_req = max_sectors; else { if (max_sectors) { if (((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1078GEN2) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0079GEN2)) && (max_sectors <= MEGASAS_MAX_SECTORS)) { instance->max_sectors_per_req = max_sectors; } else { dev_info(&instance->pdev->dev, "max_sectors should be > 0" "and <= %d (or < 1MB for GEN2 controller)\n", instance->max_sectors_per_req); } } } host->max_sectors = instance->max_sectors_per_req; host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN; host->max_channel = MEGASAS_MAX_CHANNELS - 1; host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL; host->max_lun = MEGASAS_MAX_LUN; host->max_cmd_len = 16; /* Use shared host tagset only for fusion adaptors * if there are managed interrupts (smp affinity enabled case). * Single msix_vectors in kdump, so shared host tag is also disabled. */ host->host_tagset = 0; host->nr_hw_queues = 1; if ((instance->adapter_type != MFI_SERIES) && (instance->msix_vectors > instance->low_latency_index_start) && host_tagset_enable && instance->smp_affinity_enable) { host->host_tagset = 1; host->nr_hw_queues = instance->msix_vectors - instance->low_latency_index_start + instance->iopoll_q_count; if (instance->iopoll_q_count) host->nr_maps = 3; } else { instance->iopoll_q_count = 0; } dev_info(&instance->pdev->dev, "Max firmware commands: %d shared with default " "hw_queues = %d poll_queues %d\n", instance->max_fw_cmds, host->nr_hw_queues - instance->iopoll_q_count, instance->iopoll_q_count); /* * Notify the mid-layer about the new controller */ if (scsi_add_host(host, &instance->pdev->dev)) { dev_err(&instance->pdev->dev, "Failed to add host from %s %d\n", __func__, __LINE__); return -ENODEV; } return 0; } /** * megasas_set_dma_mask - Set DMA mask for supported controllers * * @instance: Adapter soft state * Description: * * For Ventura, driver/FW will operate in 63bit DMA addresses. * * For invader- * By default, driver/FW will operate in 32bit DMA addresses * for consistent DMA mapping but if 32 bit consistent * DMA mask fails, driver will try with 63 bit consistent * mask provided FW is true 63bit DMA capable * * For older controllers(Thunderbolt and MFI based adapters)- * driver/FW will operate in 32 bit consistent DMA addresses. */ static int megasas_set_dma_mask(struct megasas_instance *instance) { u64 consistent_mask; struct pci_dev *pdev; u32 scratch_pad_1; pdev = instance->pdev; consistent_mask = (instance->adapter_type >= VENTURA_SERIES) ? DMA_BIT_MASK(63) : DMA_BIT_MASK(32); if (IS_DMA64) { if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(63)) && dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) goto fail_set_dma_mask; if ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) && (dma_set_coherent_mask(&pdev->dev, consistent_mask) && dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) { /* * If 32 bit DMA mask fails, then try for 64 bit mask * for FW capable of handling 64 bit DMA. */ scratch_pad_1 = megasas_readl (instance, &instance->reg_set->outbound_scratch_pad_1); if (!(scratch_pad_1 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET)) goto fail_set_dma_mask; else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(63))) goto fail_set_dma_mask; } } else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) goto fail_set_dma_mask; if (pdev->dev.coherent_dma_mask == DMA_BIT_MASK(32)) instance->consistent_mask_64bit = false; else instance->consistent_mask_64bit = true; dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n", ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) ? "63" : "32"), (instance->consistent_mask_64bit ? "63" : "32")); return 0; fail_set_dma_mask: dev_err(&pdev->dev, "Failed to set DMA mask\n"); return -1; } /* * megasas_set_adapter_type - Set adapter type. * Supported controllers can be divided in * different categories- * enum MR_ADAPTER_TYPE { * MFI_SERIES = 1, * THUNDERBOLT_SERIES = 2, * INVADER_SERIES = 3, * VENTURA_SERIES = 4, * AERO_SERIES = 5, * }; * @instance: Adapter soft state * return: void */ static inline void megasas_set_adapter_type(struct megasas_instance *instance) { if ((instance->pdev->vendor == PCI_VENDOR_ID_DELL) && (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5)) { instance->adapter_type = MFI_SERIES; } else { switch (instance->pdev->device) { case PCI_DEVICE_ID_LSI_AERO_10E1: case PCI_DEVICE_ID_LSI_AERO_10E2: case PCI_DEVICE_ID_LSI_AERO_10E5: case PCI_DEVICE_ID_LSI_AERO_10E6: instance->adapter_type = AERO_SERIES; break; case PCI_DEVICE_ID_LSI_VENTURA: case PCI_DEVICE_ID_LSI_CRUSADER: case PCI_DEVICE_ID_LSI_HARPOON: case PCI_DEVICE_ID_LSI_TOMCAT: case PCI_DEVICE_ID_LSI_VENTURA_4PORT: case PCI_DEVICE_ID_LSI_CRUSADER_4PORT: instance->adapter_type = VENTURA_SERIES; break; case PCI_DEVICE_ID_LSI_FUSION: case PCI_DEVICE_ID_LSI_PLASMA: instance->adapter_type = THUNDERBOLT_SERIES; break; case PCI_DEVICE_ID_LSI_INVADER: case PCI_DEVICE_ID_LSI_INTRUDER: case PCI_DEVICE_ID_LSI_INTRUDER_24: case PCI_DEVICE_ID_LSI_CUTLASS_52: case PCI_DEVICE_ID_LSI_CUTLASS_53: case PCI_DEVICE_ID_LSI_FURY: instance->adapter_type = INVADER_SERIES; break; default: /* For all other supported controllers */ instance->adapter_type = MFI_SERIES; break; } } } static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance) { instance->producer = dma_alloc_coherent(&instance->pdev->dev, sizeof(u32), &instance->producer_h, GFP_KERNEL); instance->consumer = dma_alloc_coherent(&instance->pdev->dev, sizeof(u32), &instance->consumer_h, GFP_KERNEL); if (!instance->producer || !instance->consumer) { dev_err(&instance->pdev->dev, "Failed to allocate memory for producer, consumer\n"); return -1; } *instance->producer = 0; *instance->consumer = 0; return 0; } /** * megasas_alloc_ctrl_mem - Allocate per controller memory for core data * structures which are not common across MFI * adapters and fusion adapters. * For MFI based adapters, allocate producer and * consumer buffers. For fusion adapters, allocate * memory for fusion context. * @instance: Adapter soft state * return: 0 for SUCCESS */ static int megasas_alloc_ctrl_mem(struct megasas_instance *instance) { instance->reply_map = kcalloc(nr_cpu_ids, sizeof(unsigned int), GFP_KERNEL); if (!instance->reply_map) return -ENOMEM; switch (instance->adapter_type) { case MFI_SERIES: if (megasas_alloc_mfi_ctrl_mem(instance)) return -ENOMEM; break; case AERO_SERIES: case VENTURA_SERIES: case THUNDERBOLT_SERIES: case INVADER_SERIES: if (megasas_alloc_fusion_context(instance)) return -ENOMEM; break; } return 0; } /* * megasas_free_ctrl_mem - Free fusion context for fusion adapters and * producer, consumer buffers for MFI adapters * * @instance - Adapter soft instance * */ static inline void megasas_free_ctrl_mem(struct megasas_instance *instance) { kfree(instance->reply_map); if (instance->adapter_type == MFI_SERIES) { if (instance->producer) dma_free_coherent(&instance->pdev->dev, sizeof(u32), instance->producer, instance->producer_h); if (instance->consumer) dma_free_coherent(&instance->pdev->dev, sizeof(u32), instance->consumer, instance->consumer_h); } else { megasas_free_fusion_context(instance); } } /** * megasas_alloc_ctrl_dma_buffers - Allocate consistent DMA buffers during * driver load time * * @instance: Adapter soft instance * * @return: O for SUCCESS */ static inline int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance) { struct pci_dev *pdev = instance->pdev; struct fusion_context *fusion = instance->ctrl_context; instance->evt_detail = dma_alloc_coherent(&pdev->dev, sizeof(struct megasas_evt_detail), &instance->evt_detail_h, GFP_KERNEL); if (!instance->evt_detail) { dev_err(&instance->pdev->dev, "Failed to allocate event detail buffer\n"); return -ENOMEM; } if (fusion) { fusion->ioc_init_request = dma_alloc_coherent(&pdev->dev, sizeof(struct MPI2_IOC_INIT_REQUEST), &fusion->ioc_init_request_phys, GFP_KERNEL); if (!fusion->ioc_init_request) { dev_err(&pdev->dev, "Failed to allocate ioc init request\n"); return -ENOMEM; } instance->snapdump_prop = dma_alloc_coherent(&pdev->dev, sizeof(struct MR_SNAPDUMP_PROPERTIES), &instance->snapdump_prop_h, GFP_KERNEL); if (!instance->snapdump_prop) dev_err(&pdev->dev, "Failed to allocate snapdump properties buffer\n"); instance->host_device_list_buf = dma_alloc_coherent(&pdev->dev, HOST_DEVICE_LIST_SZ, &instance->host_device_list_buf_h, GFP_KERNEL); if (!instance->host_device_list_buf) { dev_err(&pdev->dev, "Failed to allocate targetid list buffer\n"); return -ENOMEM; } } instance->pd_list_buf = dma_alloc_coherent(&pdev->dev, MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), &instance->pd_list_buf_h, GFP_KERNEL); if (!instance->pd_list_buf) { dev_err(&pdev->dev, "Failed to allocate PD list buffer\n"); return -ENOMEM; } instance->ctrl_info_buf = dma_alloc_coherent(&pdev->dev, sizeof(struct megasas_ctrl_info), &instance->ctrl_info_buf_h, GFP_KERNEL); if (!instance->ctrl_info_buf) { dev_err(&pdev->dev, "Failed to allocate controller info buffer\n"); return -ENOMEM; } instance->ld_list_buf = dma_alloc_coherent(&pdev->dev, sizeof(struct MR_LD_LIST), &instance->ld_list_buf_h, GFP_KERNEL); if (!instance->ld_list_buf) { dev_err(&pdev->dev, "Failed to allocate LD list buffer\n"); return -ENOMEM; } instance->ld_targetid_list_buf = dma_alloc_coherent(&pdev->dev, sizeof(struct MR_LD_TARGETID_LIST), &instance->ld_targetid_list_buf_h, GFP_KERNEL); if (!instance->ld_targetid_list_buf) { dev_err(&pdev->dev, "Failed to allocate LD targetid list buffer\n"); return -ENOMEM; } if (!reset_devices) { instance->system_info_buf = dma_alloc_coherent(&pdev->dev, sizeof(struct MR_DRV_SYSTEM_INFO), &instance->system_info_h, GFP_KERNEL); instance->pd_info = dma_alloc_coherent(&pdev->dev, sizeof(struct MR_PD_INFO), &instance->pd_info_h, GFP_KERNEL); instance->tgt_prop = dma_alloc_coherent(&pdev->dev, sizeof(struct MR_TARGET_PROPERTIES), &instance->tgt_prop_h, GFP_KERNEL); instance->crash_dump_buf = dma_alloc_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE, &instance->crash_dump_h, GFP_KERNEL); if (!instance->system_info_buf) dev_err(&instance->pdev->dev, "Failed to allocate system info buffer\n"); if (!instance->pd_info) dev_err(&instance->pdev->dev, "Failed to allocate pd_info buffer\n"); if (!instance->tgt_prop) dev_err(&instance->pdev->dev, "Failed to allocate tgt_prop buffer\n"); if (!instance->crash_dump_buf) dev_err(&instance->pdev->dev, "Failed to allocate crash dump buffer\n"); } return 0; } /* * megasas_free_ctrl_dma_buffers - Free consistent DMA buffers allocated * during driver load time * * @instance- Adapter soft instance * */ static inline void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance) { struct pci_dev *pdev = instance->pdev; struct fusion_context *fusion = instance->ctrl_context; if (instance->evt_detail) dma_free_coherent(&pdev->dev, sizeof(struct megasas_evt_detail), instance->evt_detail, instance->evt_detail_h); if (fusion && fusion->ioc_init_request) dma_free_coherent(&pdev->dev, sizeof(struct MPI2_IOC_INIT_REQUEST), fusion->ioc_init_request, fusion->ioc_init_request_phys); if (instance->pd_list_buf) dma_free_coherent(&pdev->dev, MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), instance->pd_list_buf, instance->pd_list_buf_h); if (instance->ld_list_buf) dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_LIST), instance->ld_list_buf, instance->ld_list_buf_h); if (instance->ld_targetid_list_buf) dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_TARGETID_LIST), instance->ld_targetid_list_buf, instance->ld_targetid_list_buf_h); if (instance->ctrl_info_buf) dma_free_coherent(&pdev->dev, sizeof(struct megasas_ctrl_info), instance->ctrl_info_buf, instance->ctrl_info_buf_h); if (instance->system_info_buf) dma_free_coherent(&pdev->dev, sizeof(struct MR_DRV_SYSTEM_INFO), instance->system_info_buf, instance->system_info_h); if (instance->pd_info) dma_free_coherent(&pdev->dev, sizeof(struct MR_PD_INFO), instance->pd_info, instance->pd_info_h); if (instance->tgt_prop) dma_free_coherent(&pdev->dev, sizeof(struct MR_TARGET_PROPERTIES), instance->tgt_prop, instance->tgt_prop_h); if (instance->crash_dump_buf) dma_free_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE, instance->crash_dump_buf, instance->crash_dump_h); if (instance->snapdump_prop) dma_free_coherent(&pdev->dev, sizeof(struct MR_SNAPDUMP_PROPERTIES), instance->snapdump_prop, instance->snapdump_prop_h); if (instance->host_device_list_buf) dma_free_coherent(&pdev->dev, HOST_DEVICE_LIST_SZ, instance->host_device_list_buf, instance->host_device_list_buf_h); } /* * megasas_init_ctrl_params - Initialize controller's instance * parameters before FW init * @instance - Adapter soft instance * @return - void */ static inline void megasas_init_ctrl_params(struct megasas_instance *instance) { instance->fw_crash_state = UNAVAILABLE; megasas_poll_wait_aen = 0; instance->issuepend_done = 1; atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); /* * Initialize locks and queues */ INIT_LIST_HEAD(&instance->cmd_pool); INIT_LIST_HEAD(&instance->internal_reset_pending_q); atomic_set(&instance->fw_outstanding, 0); atomic64_set(&instance->total_io_count, 0); init_waitqueue_head(&instance->int_cmd_wait_q); init_waitqueue_head(&instance->abort_cmd_wait_q); mutex_init(&instance->crashdump_lock); spin_lock_init(&instance->mfi_pool_lock); spin_lock_init(&instance->hba_lock); spin_lock_init(&instance->stream_lock); spin_lock_init(&instance->completion_lock); mutex_init(&instance->reset_mutex); if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) instance->flag_ieee = 1; instance->flag = 0; instance->unload = 1; instance->last_time = 0; instance->disableOnlineCtrlReset = 1; instance->UnevenSpanSupport = 0; instance->smp_affinity_enable = smp_affinity_enable ? true : false; instance->msix_load_balance = false; if (instance->adapter_type != MFI_SERIES) INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq); else INIT_WORK(&instance->work_init, process_fw_state_change_wq); } /** * megasas_probe_one - PCI hotplug entry point * @pdev: PCI device structure * @id: PCI ids of supported hotplugged adapter */ static int megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) { int rval, pos; struct Scsi_Host *host; struct megasas_instance *instance; u16 control = 0; switch (pdev->device) { case PCI_DEVICE_ID_LSI_AERO_10E0: case PCI_DEVICE_ID_LSI_AERO_10E3: case PCI_DEVICE_ID_LSI_AERO_10E4: case PCI_DEVICE_ID_LSI_AERO_10E7: dev_err(&pdev->dev, "Adapter is in non secure mode\n"); return 1; case PCI_DEVICE_ID_LSI_AERO_10E1: case PCI_DEVICE_ID_LSI_AERO_10E5: dev_info(&pdev->dev, "Adapter is in configurable secure mode\n"); break; } /* Reset MSI-X in the kdump kernel */ if (reset_devices) { pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); if (pos) { pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, &control); if (control & PCI_MSIX_FLAGS_ENABLE) { dev_info(&pdev->dev, "resetting MSI-X\n"); pci_write_config_word(pdev, pos + PCI_MSIX_FLAGS, control & ~PCI_MSIX_FLAGS_ENABLE); } } } /* * PCI prepping: enable device set bus mastering and dma mask */ rval = pci_enable_device_mem(pdev); if (rval) { return rval; } pci_set_master(pdev); host = scsi_host_alloc(&megasas_template, sizeof(struct megasas_instance)); if (!host) { dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n"); goto fail_alloc_instance; } instance = (struct megasas_instance *)host->hostdata; memset(instance, 0, sizeof(*instance)); atomic_set(&instance->fw_reset_no_pci_access, 0); /* * Initialize PCI related and misc parameters */ instance->pdev = pdev; instance->host = host; instance->unique_id = pci_dev_id(pdev); instance->init_id = MEGASAS_DEFAULT_INIT_ID; megasas_set_adapter_type(instance); /* * Initialize MFI Firmware */ if (megasas_init_fw(instance)) goto fail_init_mfi; if (instance->requestorId) { if (instance->PlasmaFW111) { instance->vf_affiliation_111 = dma_alloc_coherent(&pdev->dev, sizeof(struct MR_LD_VF_AFFILIATION_111), &instance->vf_affiliation_111_h, GFP_KERNEL); if (!instance->vf_affiliation_111) dev_warn(&pdev->dev, "Can't allocate " "memory for VF affiliation buffer\n"); } else { instance->vf_affiliation = dma_alloc_coherent(&pdev->dev, (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION), &instance->vf_affiliation_h, GFP_KERNEL); if (!instance->vf_affiliation) dev_warn(&pdev->dev, "Can't allocate " "memory for VF affiliation buffer\n"); } } /* * Store instance in PCI softstate */ pci_set_drvdata(pdev, instance); /* * Add this controller to megasas_mgmt_info structure so that it * can be exported to management applications */ megasas_mgmt_info.count++; megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance; megasas_mgmt_info.max_index++; /* * Register with SCSI mid-layer */ if (megasas_io_attach(instance)) goto fail_io_attach; instance->unload = 0; /* * Trigger SCSI to scan our drives */ if (!instance->enable_fw_dev_list || (instance->host_device_list_buf->count > 0)) scsi_scan_host(host); /* * Initiate AEN (Asynchronous Event Notification) */ if (megasas_start_aen(instance)) { dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n"); goto fail_start_aen; } megasas_setup_debugfs(instance); /* Get current SR-IOV LD/VF affiliation */ if (instance->requestorId) megasas_get_ld_vf_affiliation(instance, 1); return 0; fail_start_aen: instance->unload = 1; scsi_remove_host(instance->host); fail_io_attach: megasas_mgmt_info.count--; megasas_mgmt_info.max_index--; megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL; if (instance->requestorId && !instance->skip_heartbeat_timer_del) del_timer_sync(&instance->sriov_heartbeat_timer); instance->instancet->disable_intr(instance); megasas_destroy_irqs(instance); if (instance->adapter_type != MFI_SERIES) megasas_release_fusion(instance); else megasas_release_mfi(instance); if (instance->msix_vectors) pci_free_irq_vectors(instance->pdev); instance->msix_vectors = 0; if (instance->fw_crash_state != UNAVAILABLE) megasas_free_host_crash_buffer(instance); if (instance->adapter_type != MFI_SERIES) megasas_fusion_stop_watchdog(instance); fail_init_mfi: scsi_host_put(host); fail_alloc_instance: pci_disable_device(pdev); return -ENODEV; } /** * megasas_flush_cache - Requests FW to flush all its caches * @instance: Adapter soft state */ static void megasas_flush_cache(struct megasas_instance *instance) { struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) return; cmd = megasas_get_cmd(instance); if (!cmd) return; dcmd = &cmd->frame->dcmd; memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 0; dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = 0; dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH); dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) != DCMD_SUCCESS) { dev_err(&instance->pdev->dev, "return from %s %d\n", __func__, __LINE__); return; } megasas_return_cmd(instance, cmd); } /** * megasas_shutdown_controller - Instructs FW to shutdown the controller * @instance: Adapter soft state * @opcode: Shutdown/Hibernate */ static void megasas_shutdown_controller(struct megasas_instance *instance, u32 opcode) { struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) return; cmd = megasas_get_cmd(instance); if (!cmd) return; if (instance->aen_cmd) megasas_issue_blocked_abort_cmd(instance, instance->aen_cmd, MFI_IO_TIMEOUT_SECS); if (instance->map_update_cmd) megasas_issue_blocked_abort_cmd(instance, instance->map_update_cmd, MFI_IO_TIMEOUT_SECS); if (instance->jbod_seq_cmd) megasas_issue_blocked_abort_cmd(instance, instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS); dcmd = &cmd->frame->dcmd; memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 0; dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = 0; dcmd->opcode = cpu_to_le32(opcode); if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) != DCMD_SUCCESS) { dev_err(&instance->pdev->dev, "return from %s %d\n", __func__, __LINE__); return; } megasas_return_cmd(instance, cmd); } /** * megasas_suspend - driver suspend entry point * @dev: Device structure */ static int __maybe_unused megasas_suspend(struct device *dev) { struct megasas_instance *instance; instance = dev_get_drvdata(dev); if (!instance) return 0; instance->unload = 1; dev_info(dev, "%s is called\n", __func__); /* Shutdown SR-IOV heartbeat timer */ if (instance->requestorId && !instance->skip_heartbeat_timer_del) del_timer_sync(&instance->sriov_heartbeat_timer); /* Stop the FW fault detection watchdog */ if (instance->adapter_type != MFI_SERIES) megasas_fusion_stop_watchdog(instance); megasas_flush_cache(instance); megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN); /* cancel the delayed work if this work still in queue */ if (instance->ev != NULL) { struct megasas_aen_event *ev = instance->ev; cancel_delayed_work_sync(&ev->hotplug_work); instance->ev = NULL; } tasklet_kill(&instance->isr_tasklet); pci_set_drvdata(instance->pdev, instance); instance->instancet->disable_intr(instance); megasas_destroy_irqs(instance); if (instance->msix_vectors) pci_free_irq_vectors(instance->pdev); return 0; } /** * megasas_resume- driver resume entry point * @dev: Device structure */ static int __maybe_unused megasas_resume(struct device *dev) { int rval; struct Scsi_Host *host; struct megasas_instance *instance; u32 status_reg; instance = dev_get_drvdata(dev); if (!instance) return 0; host = instance->host; dev_info(dev, "%s is called\n", __func__); /* * We expect the FW state to be READY */ if (megasas_transition_to_ready(instance, 0)) { dev_info(&instance->pdev->dev, "Failed to transition controller to ready from %s!\n", __func__); if (instance->adapter_type != MFI_SERIES) { status_reg = instance->instancet->read_fw_status_reg(instance); if (!(status_reg & MFI_RESET_ADAPTER) || ((megasas_adp_reset_wait_for_ready (instance, true, 0)) == FAILED)) goto fail_ready_state; } else { atomic_set(&instance->fw_reset_no_pci_access, 1); instance->instancet->adp_reset (instance, instance->reg_set); atomic_set(&instance->fw_reset_no_pci_access, 0); /* waiting for about 30 seconds before retry */ ssleep(30); if (megasas_transition_to_ready(instance, 0)) goto fail_ready_state; } dev_info(&instance->pdev->dev, "FW restarted successfully from %s!\n", __func__); } if (megasas_set_dma_mask(instance)) goto fail_set_dma_mask; /* * Initialize MFI Firmware */ atomic_set(&instance->fw_outstanding, 0); atomic_set(&instance->ldio_outstanding, 0); /* Now re-enable MSI-X */ if (instance->msix_vectors) megasas_alloc_irq_vectors(instance); if (!instance->msix_vectors) { rval = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY); if (rval < 0) goto fail_reenable_msix; } megasas_setup_reply_map(instance); if (instance->adapter_type != MFI_SERIES) { megasas_reset_reply_desc(instance); if (megasas_ioc_init_fusion(instance)) { megasas_free_cmds(instance); megasas_free_cmds_fusion(instance); goto fail_init_mfi; } if (!megasas_get_map_info(instance)) megasas_sync_map_info(instance); } else { *instance->producer = 0; *instance->consumer = 0; if (megasas_issue_init_mfi(instance)) goto fail_init_mfi; } if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) goto fail_init_mfi; tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, (unsigned long)instance); if (instance->msix_vectors ? megasas_setup_irqs_msix(instance, 0) : megasas_setup_irqs_ioapic(instance)) goto fail_init_mfi; if (instance->adapter_type != MFI_SERIES) megasas_setup_irq_poll(instance); /* Re-launch SR-IOV heartbeat timer */ if (instance->requestorId) { if (!megasas_sriov_start_heartbeat(instance, 0)) megasas_start_timer(instance); else { instance->skip_heartbeat_timer_del = 1; goto fail_init_mfi; } } instance->instancet->enable_intr(instance); megasas_setup_jbod_map(instance); instance->unload = 0; /* * Initiate AEN (Asynchronous Event Notification) */ if (megasas_start_aen(instance)) dev_err(&instance->pdev->dev, "Start AEN failed\n"); /* Re-launch FW fault watchdog */ if (instance->adapter_type != MFI_SERIES) if (megasas_fusion_start_watchdog(instance) != SUCCESS) goto fail_start_watchdog; return 0; fail_start_watchdog: if (instance->requestorId && !instance->skip_heartbeat_timer_del) del_timer_sync(&instance->sriov_heartbeat_timer); fail_init_mfi: megasas_free_ctrl_dma_buffers(instance); megasas_free_ctrl_mem(instance); scsi_host_put(host); fail_reenable_msix: fail_set_dma_mask: fail_ready_state: return -ENODEV; } static inline int megasas_wait_for_adapter_operational(struct megasas_instance *instance) { int wait_time = MEGASAS_RESET_WAIT_TIME * 2; int i; u8 adp_state; for (i = 0; i < wait_time; i++) { adp_state = atomic_read(&instance->adprecovery); if ((adp_state == MEGASAS_HBA_OPERATIONAL) || (adp_state == MEGASAS_HW_CRITICAL_ERROR)) break; if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n"); msleep(1000); } if (adp_state != MEGASAS_HBA_OPERATIONAL) { dev_info(&instance->pdev->dev, "%s HBA failed to become operational, adp_state %d\n", __func__, adp_state); return 1; } return 0; } /** * megasas_detach_one - PCI hot"un"plug entry point * @pdev: PCI device structure */ static void megasas_detach_one(struct pci_dev *pdev) { int i; struct Scsi_Host *host; struct megasas_instance *instance; struct fusion_context *fusion; size_t pd_seq_map_sz; instance = pci_get_drvdata(pdev); if (!instance) return; host = instance->host; fusion = instance->ctrl_context; /* Shutdown SR-IOV heartbeat timer */ if (instance->requestorId && !instance->skip_heartbeat_timer_del) del_timer_sync(&instance->sriov_heartbeat_timer); /* Stop the FW fault detection watchdog */ if (instance->adapter_type != MFI_SERIES) megasas_fusion_stop_watchdog(instance); if (instance->fw_crash_state != UNAVAILABLE) megasas_free_host_crash_buffer(instance); scsi_remove_host(instance->host); instance->unload = 1; if (megasas_wait_for_adapter_operational(instance)) goto skip_firing_dcmds; megasas_flush_cache(instance); megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); skip_firing_dcmds: /* cancel the delayed work if this work still in queue*/ if (instance->ev != NULL) { struct megasas_aen_event *ev = instance->ev; cancel_delayed_work_sync(&ev->hotplug_work); instance->ev = NULL; } /* cancel all wait events */ wake_up_all(&instance->int_cmd_wait_q); tasklet_kill(&instance->isr_tasklet); /* * Take the instance off the instance array. Note that we will not * decrement the max_index. We let this array be sparse array */ for (i = 0; i < megasas_mgmt_info.max_index; i++) { if (megasas_mgmt_info.instance[i] == instance) { megasas_mgmt_info.count--; megasas_mgmt_info.instance[i] = NULL; break; } } instance->instancet->disable_intr(instance); megasas_destroy_irqs(instance); if (instance->msix_vectors) pci_free_irq_vectors(instance->pdev); if (instance->adapter_type >= VENTURA_SERIES) { for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) kfree(fusion->stream_detect_by_ld[i]); kfree(fusion->stream_detect_by_ld); fusion->stream_detect_by_ld = NULL; } if (instance->adapter_type != MFI_SERIES) { megasas_release_fusion(instance); pd_seq_map_sz = struct_size_t(struct MR_PD_CFG_SEQ_NUM_SYNC, seq, MAX_PHYSICAL_DEVICES); for (i = 0; i < 2 ; i++) { if (fusion->ld_map[i]) dma_free_coherent(&instance->pdev->dev, fusion->max_map_sz, fusion->ld_map[i], fusion->ld_map_phys[i]); if (fusion->ld_drv_map[i]) { if (is_vmalloc_addr(fusion->ld_drv_map[i])) vfree(fusion->ld_drv_map[i]); else free_pages((ulong)fusion->ld_drv_map[i], fusion->drv_map_pages); } if (fusion->pd_seq_sync[i]) dma_free_coherent(&instance->pdev->dev, pd_seq_map_sz, fusion->pd_seq_sync[i], fusion->pd_seq_phys[i]); } } else { megasas_release_mfi(instance); } if (instance->vf_affiliation) dma_free_coherent(&pdev->dev, (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION), instance->vf_affiliation, instance->vf_affiliation_h); if (instance->vf_affiliation_111) dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_VF_AFFILIATION_111), instance->vf_affiliation_111, instance->vf_affiliation_111_h); if (instance->hb_host_mem) dma_free_coherent(&pdev->dev, sizeof(struct MR_CTRL_HB_HOST_MEM), instance->hb_host_mem, instance->hb_host_mem_h); megasas_free_ctrl_dma_buffers(instance); megasas_free_ctrl_mem(instance); megasas_destroy_debugfs(instance); scsi_host_put(host); pci_disable_device(pdev); } /** * megasas_shutdown - Shutdown entry point * @pdev: PCI device structure */ static void megasas_shutdown(struct pci_dev *pdev) { struct megasas_instance *instance = pci_get_drvdata(pdev); if (!instance) return; instance->unload = 1; if (megasas_wait_for_adapter_operational(instance)) goto skip_firing_dcmds; megasas_flush_cache(instance); megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); skip_firing_dcmds: instance->instancet->disable_intr(instance); megasas_destroy_irqs(instance); if (instance->msix_vectors) pci_free_irq_vectors(instance->pdev); } /* * megasas_mgmt_open - char node "open" entry point * @inode: char node inode * @filep: char node file */ static int megasas_mgmt_open(struct inode *inode, struct file *filep) { /* * Allow only those users with admin rights */ if (!capable(CAP_SYS_ADMIN)) return -EACCES; return 0; } /* * megasas_mgmt_fasync - Async notifier registration from applications * @fd: char node file descriptor number * @filep: char node file * @mode: notifier on/off * * This function adds the calling process to a driver global queue. When an * event occurs, SIGIO will be sent to all processes in this queue. */ static int megasas_mgmt_fasync(int fd, struct file *filep, int mode) { int rc; mutex_lock(&megasas_async_queue_mutex); rc = fasync_helper(fd, filep, mode, &megasas_async_queue); mutex_unlock(&megasas_async_queue_mutex); if (rc >= 0) { /* For sanity check when we get ioctl */ filep->private_data = filep; return 0; } printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc); return rc; } /* * megasas_mgmt_poll - char node "poll" entry point * @filep: char node file * @wait: Events to poll for */ static __poll_t megasas_mgmt_poll(struct file *file, poll_table *wait) { __poll_t mask; unsigned long flags; poll_wait(file, &megasas_poll_wait, wait); spin_lock_irqsave(&poll_aen_lock, flags); if (megasas_poll_wait_aen) mask = (EPOLLIN | EPOLLRDNORM); else mask = 0; megasas_poll_wait_aen = 0; spin_unlock_irqrestore(&poll_aen_lock, flags); return mask; } /* * megasas_set_crash_dump_params_ioctl: * Send CRASH_DUMP_MODE DCMD to all controllers * @cmd: MFI command frame */ static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd) { struct megasas_instance *local_instance; int i, error = 0; int crash_support; crash_support = cmd->frame->dcmd.mbox.w[0]; for (i = 0; i < megasas_mgmt_info.max_index; i++) { local_instance = megasas_mgmt_info.instance[i]; if (local_instance && local_instance->crash_dump_drv_support) { if ((atomic_read(&local_instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) && !megasas_set_crash_dump_params(local_instance, crash_support)) { local_instance->crash_dump_app_support = crash_support; dev_info(&local_instance->pdev->dev, "Application firmware crash " "dump mode set success\n"); error = 0; } else { dev_info(&local_instance->pdev->dev, "Application firmware crash " "dump mode set failed\n"); error = -1; } } } return error; } /** * megasas_mgmt_fw_ioctl - Issues management ioctls to FW * @instance: Adapter soft state * @user_ioc: User's ioctl packet * @ioc: ioctl packet */ static int megasas_mgmt_fw_ioctl(struct megasas_instance *instance, struct megasas_iocpacket __user * user_ioc, struct megasas_iocpacket *ioc) { struct megasas_sge64 *kern_sge64 = NULL; struct megasas_sge32 *kern_sge32 = NULL; struct megasas_cmd *cmd; void *kbuff_arr[MAX_IOCTL_SGE]; dma_addr_t buf_handle = 0; int error = 0, i; void *sense = NULL; dma_addr_t sense_handle; void *sense_ptr; u32 opcode = 0; int ret = DCMD_SUCCESS; memset(kbuff_arr, 0, sizeof(kbuff_arr)); if (ioc->sge_count > MAX_IOCTL_SGE) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] > max limit [%d]\n", ioc->sge_count, MAX_IOCTL_SGE); return -EINVAL; } if ((ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) || ((ioc->frame.hdr.cmd == MFI_CMD_NVME) && !instance->support_nvme_passthru) || ((ioc->frame.hdr.cmd == MFI_CMD_TOOLBOX) && !instance->support_pci_lane_margining)) { dev_err(&instance->pdev->dev, "Received invalid ioctl command 0x%x\n", ioc->frame.hdr.cmd); return -ENOTSUPP; } cmd = megasas_get_cmd(instance); if (!cmd) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n"); return -ENOMEM; } /* * User's IOCTL packet has 2 frames (maximum). Copy those two * frames into our cmd's frames. cmd->frame's context will get * overwritten when we copy from user's frames. So set that value * alone separately */ memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE); cmd->frame->hdr.context = cpu_to_le32(cmd->index); cmd->frame->hdr.pad_0 = 0; cmd->frame->hdr.flags &= (~MFI_FRAME_IEEE); if (instance->consistent_mask_64bit) cmd->frame->hdr.flags |= cpu_to_le16((MFI_FRAME_SGL64 | MFI_FRAME_SENSE64)); else cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_SGL64 | MFI_FRAME_SENSE64)); if (cmd->frame->hdr.cmd == MFI_CMD_DCMD) opcode = le32_to_cpu(cmd->frame->dcmd.opcode); if (opcode == MR_DCMD_CTRL_SHUTDOWN) { mutex_lock(&instance->reset_mutex); if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) { megasas_return_cmd(instance, cmd); mutex_unlock(&instance->reset_mutex); return -1; } mutex_unlock(&instance->reset_mutex); } if (opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) { error = megasas_set_crash_dump_params_ioctl(cmd); megasas_return_cmd(instance, cmd); return error; } /* * The management interface between applications and the fw uses * MFI frames. E.g, RAID configuration changes, LD property changes * etc are accomplishes through different kinds of MFI frames. The * driver needs to care only about substituting user buffers with * kernel buffers in SGLs. The location of SGL is embedded in the * struct iocpacket itself. */ if (instance->consistent_mask_64bit) kern_sge64 = (struct megasas_sge64 *) ((unsigned long)cmd->frame + ioc->sgl_off); else kern_sge32 = (struct megasas_sge32 *) ((unsigned long)cmd->frame + ioc->sgl_off); /* * For each user buffer, create a mirror buffer and copy in */ for (i = 0; i < ioc->sge_count; i++) { if (!ioc->sgl[i].iov_len) continue; kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev, ioc->sgl[i].iov_len, &buf_handle, GFP_KERNEL); if (!kbuff_arr[i]) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc " "kernel SGL buffer for IOCTL\n"); error = -ENOMEM; goto out; } /* * We don't change the dma_coherent_mask, so * dma_alloc_coherent only returns 32bit addresses */ if (instance->consistent_mask_64bit) { kern_sge64[i].phys_addr = cpu_to_le64(buf_handle); kern_sge64[i].length = cpu_to_le32(ioc->sgl[i].iov_len); } else { kern_sge32[i].phys_addr = cpu_to_le32(buf_handle); kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len); } /* * We created a kernel buffer corresponding to the * user buffer. Now copy in from the user buffer */ if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base, (u32) (ioc->sgl[i].iov_len))) { error = -EFAULT; goto out; } } if (ioc->sense_len) { /* make sure the pointer is part of the frame */ if (ioc->sense_off > (sizeof(union megasas_frame) - sizeof(__le64))) { error = -EINVAL; goto out; } sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len, &sense_handle, GFP_KERNEL); if (!sense) { error = -ENOMEM; goto out; } /* always store 64 bits regardless of addressing */ sense_ptr = (void *)cmd->frame + ioc->sense_off; put_unaligned_le64(sense_handle, sense_ptr); } /* * Set the sync_cmd flag so that the ISR knows not to complete this * cmd to the SCSI mid-layer */ cmd->sync_cmd = 1; ret = megasas_issue_blocked_cmd(instance, cmd, 0); switch (ret) { case DCMD_INIT: case DCMD_BUSY: cmd->sync_cmd = 0; dev_err(&instance->pdev->dev, "return -EBUSY from %s %d cmd 0x%x opcode 0x%x cmd->cmd_status_drv 0x%x\n", __func__, __LINE__, cmd->frame->hdr.cmd, opcode, cmd->cmd_status_drv); error = -EBUSY; goto out; } cmd->sync_cmd = 0; if (instance->unload == 1) { dev_info(&instance->pdev->dev, "Driver unload is in progress " "don't submit data to application\n"); goto out; } /* * copy out the kernel buffers to user buffers */ for (i = 0; i < ioc->sge_count; i++) { if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i], ioc->sgl[i].iov_len)) { error = -EFAULT; goto out; } } /* * copy out the sense */ if (ioc->sense_len) { void __user *uptr; /* * sense_ptr points to the location that has the user * sense buffer address */ sense_ptr = (void *)ioc->frame.raw + ioc->sense_off; if (in_compat_syscall()) uptr = compat_ptr(get_unaligned((compat_uptr_t *) sense_ptr)); else uptr = get_unaligned((void __user **)sense_ptr); if (copy_to_user(uptr, sense, ioc->sense_len)) { dev_err(&instance->pdev->dev, "Failed to copy out to user " "sense data\n"); error = -EFAULT; goto out; } } /* * copy the status codes returned by the fw */ if (copy_to_user(&user_ioc->frame.hdr.cmd_status, &cmd->frame->hdr.cmd_status, sizeof(u8))) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n"); error = -EFAULT; } out: if (sense) { dma_free_coherent(&instance->pdev->dev, ioc->sense_len, sense, sense_handle); } for (i = 0; i < ioc->sge_count; i++) { if (kbuff_arr[i]) { if (instance->consistent_mask_64bit) dma_free_coherent(&instance->pdev->dev, le32_to_cpu(kern_sge64[i].length), kbuff_arr[i], le64_to_cpu(kern_sge64[i].phys_addr)); else dma_free_coherent(&instance->pdev->dev, le32_to_cpu(kern_sge32[i].length), kbuff_arr[i], le32_to_cpu(kern_sge32[i].phys_addr)); kbuff_arr[i] = NULL; } } megasas_return_cmd(instance, cmd); return error; } static struct megasas_iocpacket * megasas_compat_iocpacket_get_user(void __user *arg) { struct megasas_iocpacket *ioc; struct compat_megasas_iocpacket __user *cioc = arg; size_t size; int err = -EFAULT; int i; ioc = kzalloc(sizeof(*ioc), GFP_KERNEL); if (!ioc) return ERR_PTR(-ENOMEM); size = offsetof(struct megasas_iocpacket, frame) + sizeof(ioc->frame); if (copy_from_user(ioc, arg, size)) goto out; for (i = 0; i < MAX_IOCTL_SGE; i++) { compat_uptr_t iov_base; if (get_user(iov_base, &cioc->sgl[i].iov_base) || get_user(ioc->sgl[i].iov_len, &cioc->sgl[i].iov_len)) goto out; ioc->sgl[i].iov_base = compat_ptr(iov_base); } return ioc; out: kfree(ioc); return ERR_PTR(err); } static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg) { struct megasas_iocpacket __user *user_ioc = (struct megasas_iocpacket __user *)arg; struct megasas_iocpacket *ioc; struct megasas_instance *instance; int error; if (in_compat_syscall()) ioc = megasas_compat_iocpacket_get_user(user_ioc); else ioc = memdup_user(user_ioc, sizeof(struct megasas_iocpacket)); if (IS_ERR(ioc)) return PTR_ERR(ioc); instance = megasas_lookup_instance(ioc->host_no); if (!instance) { error = -ENODEV; goto out_kfree_ioc; } /* Block ioctls in VF mode */ if (instance->requestorId && !allow_vf_ioctls) { error = -ENODEV; goto out_kfree_ioc; } if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { dev_err(&instance->pdev->dev, "Controller in crit error\n"); error = -ENODEV; goto out_kfree_ioc; } if (instance->unload == 1) { error = -ENODEV; goto out_kfree_ioc; } if (down_interruptible(&instance->ioctl_sem)) { error = -ERESTARTSYS; goto out_kfree_ioc; } if (megasas_wait_for_adapter_operational(instance)) { error = -ENODEV; goto out_up; } error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc); out_up: up(&instance->ioctl_sem); out_kfree_ioc: kfree(ioc); return error; } static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg) { struct megasas_instance *instance; struct megasas_aen aen; int error; if (file->private_data != file) { printk(KERN_DEBUG "megasas: fasync_helper was not " "called first\n"); return -EINVAL; } if (copy_from_user(&aen, (void __user *)arg, sizeof(aen))) return -EFAULT; instance = megasas_lookup_instance(aen.host_no); if (!instance) return -ENODEV; if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { return -ENODEV; } if (instance->unload == 1) { return -ENODEV; } if (megasas_wait_for_adapter_operational(instance)) return -ENODEV; mutex_lock(&instance->reset_mutex); error = megasas_register_aen(instance, aen.seq_num, aen.class_locale_word); mutex_unlock(&instance->reset_mutex); return error; } /** * megasas_mgmt_ioctl - char node ioctl entry point * @file: char device file pointer * @cmd: ioctl command * @arg: ioctl command arguments address */ static long megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { switch (cmd) { case MEGASAS_IOC_FIRMWARE: return megasas_mgmt_ioctl_fw(file, arg); case MEGASAS_IOC_GET_AEN: return megasas_mgmt_ioctl_aen(file, arg); } return -ENOTTY; } #ifdef CONFIG_COMPAT static long megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { switch (cmd) { case MEGASAS_IOC_FIRMWARE32: return megasas_mgmt_ioctl_fw(file, arg); case MEGASAS_IOC_GET_AEN: return megasas_mgmt_ioctl_aen(file, arg); } return -ENOTTY; } #endif /* * File operations structure for management interface */ static const struct file_operations megasas_mgmt_fops = { .owner = THIS_MODULE, .open = megasas_mgmt_open, .fasync = megasas_mgmt_fasync, .unlocked_ioctl = megasas_mgmt_ioctl, .poll = megasas_mgmt_poll, #ifdef CONFIG_COMPAT .compat_ioctl = megasas_mgmt_compat_ioctl, #endif .llseek = noop_llseek, }; static SIMPLE_DEV_PM_OPS(megasas_pm_ops, megasas_suspend, megasas_resume); /* * PCI hotplug support registration structure */ static struct pci_driver megasas_pci_driver = { .name = "megaraid_sas", .id_table = megasas_pci_table, .probe = megasas_probe_one, .remove = megasas_detach_one, .driver.pm = &megasas_pm_ops, .shutdown = megasas_shutdown, }; /* * Sysfs driver attributes */ static ssize_t version_show(struct device_driver *dd, char *buf) { return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n", MEGASAS_VERSION); } static DRIVER_ATTR_RO(version); static ssize_t release_date_show(struct device_driver *dd, char *buf) { return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n", MEGASAS_RELDATE); } static DRIVER_ATTR_RO(release_date); static ssize_t support_poll_for_event_show(struct device_driver *dd, char *buf) { return sprintf(buf, "%u\n", support_poll_for_event); } static DRIVER_ATTR_RO(support_poll_for_event); static ssize_t support_device_change_show(struct device_driver *dd, char *buf) { return sprintf(buf, "%u\n", support_device_change); } static DRIVER_ATTR_RO(support_device_change); static ssize_t dbg_lvl_show(struct device_driver *dd, char *buf) { return sprintf(buf, "%u\n", megasas_dbg_lvl); } static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf, size_t count) { int retval = count; if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) { printk(KERN_ERR "megasas: could not set dbg_lvl\n"); retval = -EINVAL; } return retval; } static DRIVER_ATTR_RW(dbg_lvl); static ssize_t support_nvme_encapsulation_show(struct device_driver *dd, char *buf) { return sprintf(buf, "%u\n", support_nvme_encapsulation); } static DRIVER_ATTR_RO(support_nvme_encapsulation); static ssize_t support_pci_lane_margining_show(struct device_driver *dd, char *buf) { return sprintf(buf, "%u\n", support_pci_lane_margining); } static DRIVER_ATTR_RO(support_pci_lane_margining); static inline void megasas_remove_scsi_device(struct scsi_device *sdev) { sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n"); scsi_remove_device(sdev); scsi_device_put(sdev); } /** * megasas_update_device_list - Update the PD and LD device list from FW * after an AEN event notification * @instance: Adapter soft state * @event_type: Indicates type of event (PD or LD event) * * @return: Success or failure * * Issue DCMDs to Firmware to update the internal device list in driver. * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list. */ static int megasas_update_device_list(struct megasas_instance *instance, int event_type) { int dcmd_ret; if (instance->enable_fw_dev_list) { return megasas_host_device_list_query(instance, false); } else { if (event_type & SCAN_PD_CHANNEL) { dcmd_ret = megasas_get_pd_list(instance); if (dcmd_ret != DCMD_SUCCESS) return dcmd_ret; } if (event_type & SCAN_VD_CHANNEL) { if (!instance->requestorId || megasas_get_ld_vf_affiliation(instance, 0)) { return megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST); } } } return DCMD_SUCCESS; } /** * megasas_add_remove_devices - Add/remove devices to SCSI mid-layer * after an AEN event notification * @instance: Adapter soft state * @scan_type: Indicates type of devices (PD/LD) to add * @return void */ static void megasas_add_remove_devices(struct megasas_instance *instance, int scan_type) { int i, j; u16 pd_index = 0; u16 ld_index = 0; u16 channel = 0, id = 0; struct Scsi_Host *host; struct scsi_device *sdev1; struct MR_HOST_DEVICE_LIST *targetid_list = NULL; struct MR_HOST_DEVICE_LIST_ENTRY *targetid_entry = NULL; host = instance->host; if (instance->enable_fw_dev_list) { targetid_list = instance->host_device_list_buf; for (i = 0; i < targetid_list->count; i++) { targetid_entry = &targetid_list->host_device_list[i]; if (targetid_entry->flags.u.bits.is_sys_pd) { channel = le16_to_cpu(targetid_entry->target_id) / MEGASAS_MAX_DEV_PER_CHANNEL; id = le16_to_cpu(targetid_entry->target_id) % MEGASAS_MAX_DEV_PER_CHANNEL; } else { channel = MEGASAS_MAX_PD_CHANNELS + (le16_to_cpu(targetid_entry->target_id) / MEGASAS_MAX_DEV_PER_CHANNEL); id = le16_to_cpu(targetid_entry->target_id) % MEGASAS_MAX_DEV_PER_CHANNEL; } sdev1 = scsi_device_lookup(host, channel, id, 0); if (!sdev1) { scsi_add_device(host, channel, id, 0); } else { scsi_device_put(sdev1); } } } if (scan_type & SCAN_PD_CHANNEL) { for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { pd_index = i * MEGASAS_MAX_DEV_PER_CHANNEL + j; sdev1 = scsi_device_lookup(host, i, j, 0); if (instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) { if (!sdev1) scsi_add_device(host, i, j, 0); else scsi_device_put(sdev1); } else { if (sdev1) megasas_remove_scsi_device(sdev1); } } } } if (scan_type & SCAN_VD_CHANNEL) { for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0); if (instance->ld_ids[ld_index] != 0xff) { if (!sdev1) scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0); else scsi_device_put(sdev1); } else { if (sdev1) megasas_remove_scsi_device(sdev1); } } } } } static void megasas_aen_polling(struct work_struct *work) { struct megasas_aen_event *ev = container_of(work, struct megasas_aen_event, hotplug_work.work); struct megasas_instance *instance = ev->instance; union megasas_evt_class_locale class_locale; int event_type = 0; u32 seq_num; u16 ld_target_id; int error; u8 dcmd_ret = DCMD_SUCCESS; struct scsi_device *sdev1; if (!instance) { printk(KERN_ERR "invalid instance!\n"); kfree(ev); return; } /* Don't run the event workqueue thread if OCR is running */ mutex_lock(&instance->reset_mutex); instance->ev = NULL; if (instance->evt_detail) { megasas_decode_evt(instance); switch (le32_to_cpu(instance->evt_detail->code)) { case MR_EVT_PD_INSERTED: case MR_EVT_PD_REMOVED: event_type = SCAN_PD_CHANNEL; break; case MR_EVT_LD_OFFLINE: case MR_EVT_LD_DELETED: ld_target_id = instance->evt_detail->args.ld.target_id; sdev1 = scsi_device_lookup(instance->host, MEGASAS_MAX_PD_CHANNELS + (ld_target_id / MEGASAS_MAX_DEV_PER_CHANNEL), (ld_target_id % MEGASAS_MAX_DEV_PER_CHANNEL), 0); if (sdev1) megasas_remove_scsi_device(sdev1); event_type = SCAN_VD_CHANNEL; break; case MR_EVT_LD_CREATED: event_type = SCAN_VD_CHANNEL; break; case MR_EVT_CFG_CLEARED: case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: case MR_EVT_FOREIGN_CFG_IMPORTED: case MR_EVT_LD_STATE_CHANGE: event_type = SCAN_PD_CHANNEL | SCAN_VD_CHANNEL; dev_info(&instance->pdev->dev, "scanning for scsi%d...\n", instance->host->host_no); break; case MR_EVT_CTRL_PROP_CHANGED: dcmd_ret = megasas_get_ctrl_info(instance); if (dcmd_ret == DCMD_SUCCESS && instance->snapdump_wait_time) { megasas_get_snapdump_properties(instance); dev_info(&instance->pdev->dev, "Snap dump wait time\t: %d\n", instance->snapdump_wait_time); } break; default: event_type = 0; break; } } else { dev_err(&instance->pdev->dev, "invalid evt_detail!\n"); mutex_unlock(&instance->reset_mutex); kfree(ev); return; } if (event_type) dcmd_ret = megasas_update_device_list(instance, event_type); mutex_unlock(&instance->reset_mutex); if (event_type && dcmd_ret == DCMD_SUCCESS) megasas_add_remove_devices(instance, event_type); if (dcmd_ret == DCMD_SUCCESS) seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1; else seq_num = instance->last_seq_num; /* Register AEN with FW for latest sequence number plus 1 */ class_locale.members.reserved = 0; class_locale.members.locale = MR_EVT_LOCALE_ALL; class_locale.members.class = MR_EVT_CLASS_DEBUG; if (instance->aen_cmd != NULL) { kfree(ev); return; } mutex_lock(&instance->reset_mutex); error = megasas_register_aen(instance, seq_num, class_locale.word); if (error) dev_err(&instance->pdev->dev, "register aen failed error %x\n", error); mutex_unlock(&instance->reset_mutex); kfree(ev); } /** * megasas_init - Driver load entry point */ static int __init megasas_init(void) { int rval; /* * Booted in kdump kernel, minimize memory footprints by * disabling few features */ if (reset_devices) { msix_vectors = 1; rdpq_enable = 0; dual_qdepth_disable = 1; poll_queues = 0; } /* * Announce driver version and other information */ pr_info("megasas: %s\n", MEGASAS_VERSION); megasas_dbg_lvl = 0; support_poll_for_event = 2; support_device_change = 1; support_nvme_encapsulation = true; support_pci_lane_margining = true; memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info)); /* * Register character device node */ rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops); if (rval < 0) { printk(KERN_DEBUG "megasas: failed to open device node\n"); return rval; } megasas_mgmt_majorno = rval; megasas_init_debugfs(); /* * Register ourselves as PCI hotplug module */ rval = pci_register_driver(&megasas_pci_driver); if (rval) { printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n"); goto err_pcidrv; } if ((event_log_level < MFI_EVT_CLASS_DEBUG) || (event_log_level > MFI_EVT_CLASS_DEAD)) { pr_warn("megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n"); event_log_level = MFI_EVT_CLASS_CRITICAL; } rval = driver_create_file(&megasas_pci_driver.driver, &driver_attr_version); if (rval) goto err_dcf_attr_ver; rval = driver_create_file(&megasas_pci_driver.driver, &driver_attr_release_date); if (rval) goto err_dcf_rel_date; rval = driver_create_file(&megasas_pci_driver.driver, &driver_attr_support_poll_for_event); if (rval) goto err_dcf_support_poll_for_event; rval = driver_create_file(&megasas_pci_driver.driver, &driver_attr_dbg_lvl); if (rval) goto err_dcf_dbg_lvl; rval = driver_create_file(&megasas_pci_driver.driver, &driver_attr_support_device_change); if (rval) goto err_dcf_support_device_change; rval = driver_create_file(&megasas_pci_driver.driver, &driver_attr_support_nvme_encapsulation); if (rval) goto err_dcf_support_nvme_encapsulation; rval = driver_create_file(&megasas_pci_driver.driver, &driver_attr_support_pci_lane_margining); if (rval) goto err_dcf_support_pci_lane_margining; return rval; err_dcf_support_pci_lane_margining: driver_remove_file(&megasas_pci_driver.driver, &driver_attr_support_nvme_encapsulation); err_dcf_support_nvme_encapsulation: driver_remove_file(&megasas_pci_driver.driver, &driver_attr_support_device_change); err_dcf_support_device_change: driver_remove_file(&megasas_pci_driver.driver, &driver_attr_dbg_lvl); err_dcf_dbg_lvl: driver_remove_file(&megasas_pci_driver.driver, &driver_attr_support_poll_for_event); err_dcf_support_poll_for_event: driver_remove_file(&megasas_pci_driver.driver, &driver_attr_release_date); err_dcf_rel_date: driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); err_dcf_attr_ver: pci_unregister_driver(&megasas_pci_driver); err_pcidrv: megasas_exit_debugfs(); unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); return rval; } /** * megasas_exit - Driver unload entry point */ static void __exit megasas_exit(void) { driver_remove_file(&megasas_pci_driver.driver, &driver_attr_dbg_lvl); driver_remove_file(&megasas_pci_driver.driver, &driver_attr_support_poll_for_event); driver_remove_file(&megasas_pci_driver.driver, &driver_attr_support_device_change); driver_remove_file(&megasas_pci_driver.driver, &driver_attr_release_date); driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); driver_remove_file(&megasas_pci_driver.driver, &driver_attr_support_nvme_encapsulation); driver_remove_file(&megasas_pci_driver.driver, &driver_attr_support_pci_lane_margining); pci_unregister_driver(&megasas_pci_driver); megasas_exit_debugfs(); unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); } module_init(megasas_init); module_exit(megasas_exit);
linux-master
drivers/scsi/megaraid/megaraid_sas_base.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Linux MegaRAID driver for SAS based RAID controllers * * Copyright (c) 2009-2013 LSI Corporation * Copyright (c) 2013-2016 Avago Technologies * Copyright (c) 2016-2018 Broadcom Inc. * * FILE: megaraid_sas_fp.c * * Authors: Broadcom Inc. * Sumant Patro * Varad Talamacki * Manoj Jose * Kashyap Desai <[email protected]> * Sumit Saxena <[email protected]> * * Send feedback to: [email protected] */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/list.h> #include <linux/moduleparam.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/uio.h> #include <linux/uaccess.h> #include <linux/fs.h> #include <linux/compat.h> #include <linux/blkdev.h> #include <linux/poll.h> #include <linux/irq_poll.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include "megaraid_sas_fusion.h" #include "megaraid_sas.h" #include <asm/div64.h> #define LB_PENDING_CMDS_DEFAULT 4 static unsigned int lb_pending_cmds = LB_PENDING_CMDS_DEFAULT; module_param(lb_pending_cmds, int, 0444); MODULE_PARM_DESC(lb_pending_cmds, "Change raid-1 load balancing outstanding " "threshold. Valid Values are 1-128. Default: 4"); #define ABS_DIFF(a, b) (((a) > (b)) ? ((a) - (b)) : ((b) - (a))) #define MR_LD_STATE_OPTIMAL 3 #define SPAN_ROW_SIZE(map, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowSize) #define SPAN_ROW_DATA_SIZE(map_, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize) #define SPAN_INVALID 0xff /* Prototypes */ static void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo); static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld, u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info, struct RAID_CONTEXT *pRAID_Context, struct MR_DRV_RAID_MAP_ALL *map); static u64 get_row_from_strip(struct megasas_instance *instance, u32 ld, u64 strip, struct MR_DRV_RAID_MAP_ALL *map); u32 mega_mod64(u64 dividend, u32 divisor) { u64 d; u32 remainder; if (!divisor) printk(KERN_ERR "megasas : DIVISOR is zero, in div fn\n"); d = dividend; remainder = do_div(d, divisor); return remainder; } /** * mega_div64_32 - Do a 64-bit division * @dividend: Dividend * @divisor: Divisor * * @return quotient **/ static u64 mega_div64_32(uint64_t dividend, uint32_t divisor) { u64 d = dividend; if (!divisor) printk(KERN_ERR "megasas : DIVISOR is zero in mod fn\n"); do_div(d, divisor); return d; } struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_DRV_RAID_MAP_ALL *map) { return &map->raidMap.ldSpanMap[ld].ldRaid; } static struct MR_SPAN_BLOCK_INFO *MR_LdSpanInfoGet(u32 ld, struct MR_DRV_RAID_MAP_ALL *map) { return &map->raidMap.ldSpanMap[ld].spanBlock[0]; } static u8 MR_LdDataArmGet(u32 ld, u32 armIdx, struct MR_DRV_RAID_MAP_ALL *map) { return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx]; } u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_DRV_RAID_MAP_ALL *map) { return le16_to_cpu(map->raidMap.arMapInfo[ar].pd[arm]); } u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map) { return le16_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef); } __le16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map) { return map->raidMap.devHndlInfo[pd].curDevHdl; } static u8 MR_PdInterfaceTypeGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map) { return map->raidMap.devHndlInfo[pd].interfaceType; } u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map) { return le16_to_cpu(map->raidMap.ldSpanMap[ld].ldRaid.targetId); } u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map) { return map->raidMap.ldTgtIdToLd[ldTgtId]; } static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map) { return &map->raidMap.ldSpanMap[ld].spanBlock[span].span; } /* * This function will Populate Driver Map using firmware raid map */ static int MR_PopulateDrvRaidMap(struct megasas_instance *instance, u64 map_id) { struct fusion_context *fusion = instance->ctrl_context; struct MR_FW_RAID_MAP_ALL *fw_map_old = NULL; struct MR_FW_RAID_MAP *pFwRaidMap = NULL; int i, j; u16 ld_count; struct MR_FW_RAID_MAP_DYNAMIC *fw_map_dyn; struct MR_FW_RAID_MAP_EXT *fw_map_ext; struct MR_RAID_MAP_DESC_TABLE *desc_table; struct MR_DRV_RAID_MAP_ALL *drv_map = fusion->ld_drv_map[(map_id & 1)]; struct MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap; void *raid_map_data = NULL; memset(drv_map, 0, fusion->drv_map_sz); memset(pDrvRaidMap->ldTgtIdToLd, 0xff, (sizeof(u16) * MAX_LOGICAL_DRIVES_DYN)); if (instance->max_raid_mapsize) { fw_map_dyn = fusion->ld_map[(map_id & 1)]; desc_table = (struct MR_RAID_MAP_DESC_TABLE *)((void *)fw_map_dyn + le32_to_cpu(fw_map_dyn->desc_table_offset)); if (desc_table != fw_map_dyn->raid_map_desc_table) dev_dbg(&instance->pdev->dev, "offsets of desc table are not matching desc %p original %p\n", desc_table, fw_map_dyn->raid_map_desc_table); ld_count = (u16)le16_to_cpu(fw_map_dyn->ld_count); pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count); pDrvRaidMap->fpPdIoTimeoutSec = fw_map_dyn->fp_pd_io_timeout_sec; pDrvRaidMap->totalSize = cpu_to_le32(sizeof(struct MR_DRV_RAID_MAP_ALL)); /* point to actual data starting point*/ raid_map_data = (void *)fw_map_dyn + le32_to_cpu(fw_map_dyn->desc_table_offset) + le32_to_cpu(fw_map_dyn->desc_table_size); for (i = 0; i < le32_to_cpu(fw_map_dyn->desc_table_num_elements); ++i) { switch (le32_to_cpu(desc_table->raid_map_desc_type)) { case RAID_MAP_DESC_TYPE_DEVHDL_INFO: fw_map_dyn->dev_hndl_info = (struct MR_DEV_HANDLE_INFO *)(raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset)); memcpy(pDrvRaidMap->devHndlInfo, fw_map_dyn->dev_hndl_info, sizeof(struct MR_DEV_HANDLE_INFO) * le32_to_cpu(desc_table->raid_map_desc_elements)); break; case RAID_MAP_DESC_TYPE_TGTID_INFO: fw_map_dyn->ld_tgt_id_to_ld = (u16 *)(raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset)); for (j = 0; j < le32_to_cpu(desc_table->raid_map_desc_elements); j++) { pDrvRaidMap->ldTgtIdToLd[j] = le16_to_cpu(fw_map_dyn->ld_tgt_id_to_ld[j]); } break; case RAID_MAP_DESC_TYPE_ARRAY_INFO: fw_map_dyn->ar_map_info = (struct MR_ARRAY_INFO *) (raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset)); memcpy(pDrvRaidMap->arMapInfo, fw_map_dyn->ar_map_info, sizeof(struct MR_ARRAY_INFO) * le32_to_cpu(desc_table->raid_map_desc_elements)); break; case RAID_MAP_DESC_TYPE_SPAN_INFO: fw_map_dyn->ld_span_map = (struct MR_LD_SPAN_MAP *) (raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset)); memcpy(pDrvRaidMap->ldSpanMap, fw_map_dyn->ld_span_map, sizeof(struct MR_LD_SPAN_MAP) * le32_to_cpu(desc_table->raid_map_desc_elements)); break; default: dev_dbg(&instance->pdev->dev, "wrong number of desctableElements %d\n", fw_map_dyn->desc_table_num_elements); } ++desc_table; } } else if (instance->supportmax256vd) { fw_map_ext = (struct MR_FW_RAID_MAP_EXT *)fusion->ld_map[(map_id & 1)]; ld_count = (u16)le16_to_cpu(fw_map_ext->ldCount); if (ld_count > MAX_LOGICAL_DRIVES_EXT) { dev_dbg(&instance->pdev->dev, "megaraid_sas: LD count exposed in RAID map in not valid\n"); return 1; } pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count); pDrvRaidMap->fpPdIoTimeoutSec = fw_map_ext->fpPdIoTimeoutSec; for (i = 0; i < (MAX_LOGICAL_DRIVES_EXT); i++) pDrvRaidMap->ldTgtIdToLd[i] = (u16)fw_map_ext->ldTgtIdToLd[i]; memcpy(pDrvRaidMap->ldSpanMap, fw_map_ext->ldSpanMap, sizeof(struct MR_LD_SPAN_MAP) * ld_count); memcpy(pDrvRaidMap->arMapInfo, fw_map_ext->arMapInfo, sizeof(struct MR_ARRAY_INFO) * MAX_API_ARRAYS_EXT); memcpy(pDrvRaidMap->devHndlInfo, fw_map_ext->devHndlInfo, sizeof(struct MR_DEV_HANDLE_INFO) * MAX_RAIDMAP_PHYSICAL_DEVICES); /* New Raid map will not set totalSize, so keep expected value * for legacy code in ValidateMapInfo */ pDrvRaidMap->totalSize = cpu_to_le32(sizeof(struct MR_FW_RAID_MAP_EXT)); } else { fw_map_old = (struct MR_FW_RAID_MAP_ALL *) fusion->ld_map[(map_id & 1)]; pFwRaidMap = &fw_map_old->raidMap; ld_count = (u16)le32_to_cpu(pFwRaidMap->ldCount); if (ld_count > MAX_LOGICAL_DRIVES) { dev_dbg(&instance->pdev->dev, "LD count exposed in RAID map in not valid\n"); return 1; } pDrvRaidMap->totalSize = pFwRaidMap->totalSize; pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count); pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec; for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++) pDrvRaidMap->ldTgtIdToLd[i] = (u8)pFwRaidMap->ldTgtIdToLd[i]; for (i = 0; i < ld_count; i++) { pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i]; } memcpy(pDrvRaidMap->arMapInfo, pFwRaidMap->arMapInfo, sizeof(struct MR_ARRAY_INFO) * MAX_RAIDMAP_ARRAYS); memcpy(pDrvRaidMap->devHndlInfo, pFwRaidMap->devHndlInfo, sizeof(struct MR_DEV_HANDLE_INFO) * MAX_RAIDMAP_PHYSICAL_DEVICES); } return 0; } /* * This function will validate Map info data provided by FW */ u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id) { struct fusion_context *fusion; struct MR_DRV_RAID_MAP_ALL *drv_map; struct MR_DRV_RAID_MAP *pDrvRaidMap; struct LD_LOAD_BALANCE_INFO *lbInfo; PLD_SPAN_INFO ldSpanInfo; struct MR_LD_RAID *raid; u16 num_lds, i; u16 ld; u32 expected_size; if (MR_PopulateDrvRaidMap(instance, map_id)) return 0; fusion = instance->ctrl_context; drv_map = fusion->ld_drv_map[(map_id & 1)]; pDrvRaidMap = &drv_map->raidMap; lbInfo = fusion->load_balance_info; ldSpanInfo = fusion->log_to_span; if (instance->max_raid_mapsize) expected_size = sizeof(struct MR_DRV_RAID_MAP_ALL); else if (instance->supportmax256vd) expected_size = sizeof(struct MR_FW_RAID_MAP_EXT); else expected_size = struct_size_t(struct MR_FW_RAID_MAP, ldSpanMap, le16_to_cpu(pDrvRaidMap->ldCount)); if (le32_to_cpu(pDrvRaidMap->totalSize) != expected_size) { dev_dbg(&instance->pdev->dev, "megasas: map info structure size 0x%x", le32_to_cpu(pDrvRaidMap->totalSize)); dev_dbg(&instance->pdev->dev, "is not matching expected size 0x%x\n", (unsigned int)expected_size); dev_err(&instance->pdev->dev, "megasas: span map %x, pDrvRaidMap->totalSize : %x\n", (unsigned int)sizeof(struct MR_LD_SPAN_MAP), le32_to_cpu(pDrvRaidMap->totalSize)); return 0; } if (instance->UnevenSpanSupport) mr_update_span_set(drv_map, ldSpanInfo); if (lbInfo) mr_update_load_balance_params(drv_map, lbInfo); num_lds = le16_to_cpu(drv_map->raidMap.ldCount); memcpy(instance->ld_ids_prev, instance->ld_ids_from_raidmap, sizeof(instance->ld_ids_from_raidmap)); memset(instance->ld_ids_from_raidmap, 0xff, MEGASAS_MAX_LD_IDS); /*Convert Raid capability values to CPU arch */ for (i = 0; (num_lds > 0) && (i < MAX_LOGICAL_DRIVES_EXT); i++) { ld = MR_TargetIdToLdGet(i, drv_map); /* For non existing VDs, iterate to next VD*/ if (ld >= MEGASAS_MAX_SUPPORTED_LD_IDS) continue; raid = MR_LdRaidGet(ld, drv_map); le32_to_cpus((u32 *)&raid->capability); instance->ld_ids_from_raidmap[i] = i; num_lds--; } return 1; } static u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk, struct MR_DRV_RAID_MAP_ALL *map) { struct MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map); struct MR_QUAD_ELEMENT *quad; struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); u32 span, j; for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) { for (j = 0; j < le32_to_cpu(pSpanBlock->block_span_info.noElements); j++) { quad = &pSpanBlock->block_span_info.quad[j]; if (le32_to_cpu(quad->diff) == 0) return SPAN_INVALID; if (le64_to_cpu(quad->logStart) <= row && row <= le64_to_cpu(quad->logEnd) && (mega_mod64(row - le64_to_cpu(quad->logStart), le32_to_cpu(quad->diff))) == 0) { if (span_blk != NULL) { u64 blk; blk = mega_div64_32((row-le64_to_cpu(quad->logStart)), le32_to_cpu(quad->diff)); blk = (blk + le64_to_cpu(quad->offsetInSpan)) << raid->stripeShift; *span_blk = blk; } return span; } } } return SPAN_INVALID; } /* ****************************************************************************** * * This routine calculates the Span block for given row using spanset. * * Inputs : * instance - HBA instance * ld - Logical drive number * row - Row number * map - LD map * * Outputs : * * span - Span number * block - Absolute Block number in the physical disk * div_error - Devide error code. */ static u32 mr_spanset_get_span_block(struct megasas_instance *instance, u32 ld, u64 row, u64 *span_blk, struct MR_DRV_RAID_MAP_ALL *map) { struct fusion_context *fusion = instance->ctrl_context; struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); LD_SPAN_SET *span_set; struct MR_QUAD_ELEMENT *quad; u32 span, info; PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span; for (info = 0; info < MAX_QUAD_DEPTH; info++) { span_set = &(ldSpanInfo[ld].span_set[info]); if (span_set->span_row_data_width == 0) break; if (row > span_set->data_row_end) continue; for (span = 0; span < raid->spanDepth; span++) if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. block_span_info.noElements) >= info+1) { quad = &map->raidMap.ldSpanMap[ld]. spanBlock[span]. block_span_info.quad[info]; if (le32_to_cpu(quad->diff) == 0) return SPAN_INVALID; if (le64_to_cpu(quad->logStart) <= row && row <= le64_to_cpu(quad->logEnd) && (mega_mod64(row - le64_to_cpu(quad->logStart), le32_to_cpu(quad->diff))) == 0) { if (span_blk != NULL) { u64 blk; blk = mega_div64_32 ((row - le64_to_cpu(quad->logStart)), le32_to_cpu(quad->diff)); blk = (blk + le64_to_cpu(quad->offsetInSpan)) << raid->stripeShift; *span_blk = blk; } return span; } } } return SPAN_INVALID; } /* ****************************************************************************** * * This routine calculates the row for given strip using spanset. * * Inputs : * instance - HBA instance * ld - Logical drive number * Strip - Strip * map - LD map * * Outputs : * * row - row associated with strip */ static u64 get_row_from_strip(struct megasas_instance *instance, u32 ld, u64 strip, struct MR_DRV_RAID_MAP_ALL *map) { struct fusion_context *fusion = instance->ctrl_context; struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); LD_SPAN_SET *span_set; PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span; u32 info, strip_offset, span, span_offset; u64 span_set_Strip, span_set_Row, retval; for (info = 0; info < MAX_QUAD_DEPTH; info++) { span_set = &(ldSpanInfo[ld].span_set[info]); if (span_set->span_row_data_width == 0) break; if (strip > span_set->data_strip_end) continue; span_set_Strip = strip - span_set->data_strip_start; strip_offset = mega_mod64(span_set_Strip, span_set->span_row_data_width); span_set_Row = mega_div64_32(span_set_Strip, span_set->span_row_data_width) * span_set->diff; for (span = 0, span_offset = 0; span < raid->spanDepth; span++) if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. block_span_info.noElements) >= info+1) { if (strip_offset >= span_set->strip_offset[span]) span_offset++; else break; } retval = (span_set->data_row_start + span_set_Row + (span_offset - 1)); return retval; } return -1LLU; } /* ****************************************************************************** * * This routine calculates the Start Strip for given row using spanset. * * Inputs : * instance - HBA instance * ld - Logical drive number * row - Row number * map - LD map * * Outputs : * * Strip - Start strip associated with row */ static u64 get_strip_from_row(struct megasas_instance *instance, u32 ld, u64 row, struct MR_DRV_RAID_MAP_ALL *map) { struct fusion_context *fusion = instance->ctrl_context; struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); LD_SPAN_SET *span_set; struct MR_QUAD_ELEMENT *quad; PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span; u32 span, info; u64 strip; for (info = 0; info < MAX_QUAD_DEPTH; info++) { span_set = &(ldSpanInfo[ld].span_set[info]); if (span_set->span_row_data_width == 0) break; if (row > span_set->data_row_end) continue; for (span = 0; span < raid->spanDepth; span++) if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. block_span_info.noElements) >= info+1) { quad = &map->raidMap.ldSpanMap[ld]. spanBlock[span].block_span_info.quad[info]; if (le64_to_cpu(quad->logStart) <= row && row <= le64_to_cpu(quad->logEnd) && mega_mod64((row - le64_to_cpu(quad->logStart)), le32_to_cpu(quad->diff)) == 0) { strip = mega_div64_32 (((row - span_set->data_row_start) - le64_to_cpu(quad->logStart)), le32_to_cpu(quad->diff)); strip *= span_set->span_row_data_width; strip += span_set->data_strip_start; strip += span_set->strip_offset[span]; return strip; } } } dev_err(&instance->pdev->dev, "get_strip_from_row" "returns invalid strip for ld=%x, row=%lx\n", ld, (long unsigned int)row); return -1; } /* ****************************************************************************** * * This routine calculates the Physical Arm for given strip using spanset. * * Inputs : * instance - HBA instance * ld - Logical drive number * strip - Strip * map - LD map * * Outputs : * * Phys Arm - Phys Arm associated with strip */ static u32 get_arm_from_strip(struct megasas_instance *instance, u32 ld, u64 strip, struct MR_DRV_RAID_MAP_ALL *map) { struct fusion_context *fusion = instance->ctrl_context; struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); LD_SPAN_SET *span_set; PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span; u32 info, strip_offset, span, span_offset, retval; for (info = 0 ; info < MAX_QUAD_DEPTH; info++) { span_set = &(ldSpanInfo[ld].span_set[info]); if (span_set->span_row_data_width == 0) break; if (strip > span_set->data_strip_end) continue; strip_offset = (uint)mega_mod64 ((strip - span_set->data_strip_start), span_set->span_row_data_width); for (span = 0, span_offset = 0; span < raid->spanDepth; span++) if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. block_span_info.noElements) >= info+1) { if (strip_offset >= span_set->strip_offset[span]) span_offset = span_set->strip_offset[span]; else break; } retval = (strip_offset - span_offset); return retval; } dev_err(&instance->pdev->dev, "get_arm_from_strip" "returns invalid arm for ld=%x strip=%lx\n", ld, (long unsigned int)strip); return -1; } /* This Function will return Phys arm */ static u8 get_arm(struct megasas_instance *instance, u32 ld, u8 span, u64 stripe, struct MR_DRV_RAID_MAP_ALL *map) { struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); /* Need to check correct default value */ u32 arm = 0; switch (raid->level) { case 0: case 5: case 6: arm = mega_mod64(stripe, SPAN_ROW_SIZE(map, ld, span)); break; case 1: /* start with logical arm */ arm = get_arm_from_strip(instance, ld, stripe, map); if (arm != -1U) arm *= 2; break; } return arm; } /* ****************************************************************************** * * This routine calculates the arm, span and block for the specified stripe and * reference in stripe using spanset * * Inputs : * * ld - Logical drive number * stripRow - Stripe number * stripRef - Reference in stripe * * Outputs : * * span - Span number * block - Absolute Block number in the physical disk */ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld, u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info, struct RAID_CONTEXT *pRAID_Context, struct MR_DRV_RAID_MAP_ALL *map) { struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); u32 pd, arRef, r1_alt_pd; u8 physArm, span; u64 row; u8 retval = true; u64 *pdBlock = &io_info->pdBlock; __le16 *pDevHandle = &io_info->devHandle; u8 *pPdInterface = &io_info->pd_interface; u32 logArm, rowMod, armQ, arm; *pDevHandle = cpu_to_le16(MR_DEVHANDLE_INVALID); /*Get row and span from io_info for Uneven Span IO.*/ row = io_info->start_row; span = io_info->start_span; if (raid->level == 6) { logArm = get_arm_from_strip(instance, ld, stripRow, map); if (logArm == -1U) return false; rowMod = mega_mod64(row, SPAN_ROW_SIZE(map, ld, span)); armQ = SPAN_ROW_SIZE(map, ld, span) - 1 - rowMod; arm = armQ + 1 + logArm; if (arm >= SPAN_ROW_SIZE(map, ld, span)) arm -= SPAN_ROW_SIZE(map, ld, span); physArm = (u8)arm; } else /* Calculate the arm */ physArm = get_arm(instance, ld, span, stripRow, map); if (physArm == 0xFF) return false; arRef = MR_LdSpanArrayGet(ld, span, map); pd = MR_ArPdGet(arRef, physArm, map); if (pd != MR_PD_INVALID) { *pDevHandle = MR_PdDevHandleGet(pd, map); *pPdInterface = MR_PdInterfaceTypeGet(pd, map); /* get second pd also for raid 1/10 fast path writes*/ if ((instance->adapter_type >= VENTURA_SERIES) && (raid->level == 1) && !io_info->isRead) { r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map); if (r1_alt_pd != MR_PD_INVALID) io_info->r1_alt_dev_handle = MR_PdDevHandleGet(r1_alt_pd, map); } } else { if ((raid->level >= 5) && ((instance->adapter_type == THUNDERBOLT_SERIES) || ((instance->adapter_type == INVADER_SERIES) && (raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))) pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE; else if (raid->level == 1) { physArm = physArm + 1; pd = MR_ArPdGet(arRef, physArm, map); if (pd != MR_PD_INVALID) { *pDevHandle = MR_PdDevHandleGet(pd, map); *pPdInterface = MR_PdInterfaceTypeGet(pd, map); } } } *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk); if (instance->adapter_type >= VENTURA_SERIES) { ((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; io_info->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; } else { pRAID_Context->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; io_info->span_arm = pRAID_Context->span_arm; } io_info->pd_after_lb = pd; return retval; } /* ****************************************************************************** * * This routine calculates the arm, span and block for the specified stripe and * reference in stripe. * * Inputs : * * ld - Logical drive number * stripRow - Stripe number * stripRef - Reference in stripe * * Outputs : * * span - Span number * block - Absolute Block number in the physical disk */ static u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info, struct RAID_CONTEXT *pRAID_Context, struct MR_DRV_RAID_MAP_ALL *map) { struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); u32 pd, arRef, r1_alt_pd; u8 physArm, span; u64 row; u8 retval = true; u64 *pdBlock = &io_info->pdBlock; __le16 *pDevHandle = &io_info->devHandle; u8 *pPdInterface = &io_info->pd_interface; *pDevHandle = cpu_to_le16(MR_DEVHANDLE_INVALID); row = mega_div64_32(stripRow, raid->rowDataSize); if (raid->level == 6) { /* logical arm within row */ u32 logArm = mega_mod64(stripRow, raid->rowDataSize); u32 rowMod, armQ, arm; if (raid->rowSize == 0) return false; /* get logical row mod */ rowMod = mega_mod64(row, raid->rowSize); armQ = raid->rowSize-1-rowMod; /* index of Q drive */ arm = armQ+1+logArm; /* data always logically follows Q */ if (arm >= raid->rowSize) /* handle wrap condition */ arm -= raid->rowSize; physArm = (u8)arm; } else { if (raid->modFactor == 0) return false; physArm = MR_LdDataArmGet(ld, mega_mod64(stripRow, raid->modFactor), map); } if (raid->spanDepth == 1) { span = 0; *pdBlock = row << raid->stripeShift; } else { span = (u8)MR_GetSpanBlock(ld, row, pdBlock, map); if (span == SPAN_INVALID) return false; } /* Get the array on which this span is present */ arRef = MR_LdSpanArrayGet(ld, span, map); pd = MR_ArPdGet(arRef, physArm, map); /* Get the pd */ if (pd != MR_PD_INVALID) { /* Get dev handle from Pd. */ *pDevHandle = MR_PdDevHandleGet(pd, map); *pPdInterface = MR_PdInterfaceTypeGet(pd, map); /* get second pd also for raid 1/10 fast path writes*/ if ((instance->adapter_type >= VENTURA_SERIES) && (raid->level == 1) && !io_info->isRead) { r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map); if (r1_alt_pd != MR_PD_INVALID) io_info->r1_alt_dev_handle = MR_PdDevHandleGet(r1_alt_pd, map); } } else { if ((raid->level >= 5) && ((instance->adapter_type == THUNDERBOLT_SERIES) || ((instance->adapter_type == INVADER_SERIES) && (raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))) pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE; else if (raid->level == 1) { /* Get alternate Pd. */ physArm = physArm + 1; pd = MR_ArPdGet(arRef, physArm, map); if (pd != MR_PD_INVALID) { /* Get dev handle from Pd */ *pDevHandle = MR_PdDevHandleGet(pd, map); *pPdInterface = MR_PdInterfaceTypeGet(pd, map); } } } *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk); if (instance->adapter_type >= VENTURA_SERIES) { ((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; io_info->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; } else { pRAID_Context->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; io_info->span_arm = pRAID_Context->span_arm; } io_info->pd_after_lb = pd; return retval; } /* * mr_get_phy_params_r56_rmw - Calculate parameters for R56 CTIO write operation * @instance: Adapter soft state * @ld: LD index * @stripNo: Strip Number * @io_info: IO info structure pointer * pRAID_Context: RAID context pointer * map: RAID map pointer * * This routine calculates the logical arm, data Arm, row number and parity arm * for R56 CTIO write operation. */ static void mr_get_phy_params_r56_rmw(struct megasas_instance *instance, u32 ld, u64 stripNo, struct IO_REQUEST_INFO *io_info, struct RAID_CONTEXT_G35 *pRAID_Context, struct MR_DRV_RAID_MAP_ALL *map) { struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); u8 span, dataArms, arms, dataArm, logArm; s8 rightmostParityArm, PParityArm; u64 rowNum; u64 *pdBlock = &io_info->pdBlock; dataArms = raid->rowDataSize; arms = raid->rowSize; rowNum = mega_div64_32(stripNo, dataArms); /* parity disk arm, first arm is 0 */ rightmostParityArm = (arms - 1) - mega_mod64(rowNum, arms); /* logical arm within row */ logArm = mega_mod64(stripNo, dataArms); /* physical arm for data */ dataArm = mega_mod64((rightmostParityArm + 1 + logArm), arms); if (raid->spanDepth == 1) { span = 0; } else { span = (u8)MR_GetSpanBlock(ld, rowNum, pdBlock, map); if (span == SPAN_INVALID) return; } if (raid->level == 6) { /* P Parity arm, note this can go negative adjust if negative */ PParityArm = (arms - 2) - mega_mod64(rowNum, arms); if (PParityArm < 0) PParityArm += arms; /* rightmostParityArm is P-Parity for RAID 5 and Q-Parity for RAID */ pRAID_Context->flow_specific.r56_arm_map = rightmostParityArm; pRAID_Context->flow_specific.r56_arm_map |= (u16)(PParityArm << RAID_CTX_R56_P_ARM_SHIFT); } else { pRAID_Context->flow_specific.r56_arm_map |= (u16)(rightmostParityArm << RAID_CTX_R56_P_ARM_SHIFT); } pRAID_Context->reg_lock_row_lba = cpu_to_le64(rowNum); pRAID_Context->flow_specific.r56_arm_map |= (u16)(logArm << RAID_CTX_R56_LOG_ARM_SHIFT); cpu_to_le16s(&pRAID_Context->flow_specific.r56_arm_map); pRAID_Context->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | dataArm; pRAID_Context->raid_flags = (MR_RAID_FLAGS_IO_SUB_TYPE_R56_DIV_OFFLOAD << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT); return; } /* ****************************************************************************** * * MR_BuildRaidContext function * * This function will initiate command processing. The start/end row and strip * information is calculated then the lock is acquired. * This function will return 0 if region lock was acquired OR return num strips */ u8 MR_BuildRaidContext(struct megasas_instance *instance, struct IO_REQUEST_INFO *io_info, struct RAID_CONTEXT *pRAID_Context, struct MR_DRV_RAID_MAP_ALL *map, u8 **raidLUN) { struct fusion_context *fusion; struct MR_LD_RAID *raid; u32 stripSize, stripe_mask; u64 endLba, endStrip, endRow, start_row, start_strip; u64 regStart; u32 regSize; u8 num_strips, numRows; u16 ref_in_start_stripe, ref_in_end_stripe; u64 ldStartBlock; u32 numBlocks, ldTgtId; u8 isRead; u8 retval = 0; u8 startlba_span = SPAN_INVALID; u64 *pdBlock = &io_info->pdBlock; u16 ld; ldStartBlock = io_info->ldStartBlock; numBlocks = io_info->numBlocks; ldTgtId = io_info->ldTgtId; isRead = io_info->isRead; io_info->IoforUnevenSpan = 0; io_info->start_span = SPAN_INVALID; fusion = instance->ctrl_context; ld = MR_TargetIdToLdGet(ldTgtId, map); raid = MR_LdRaidGet(ld, map); /*check read ahead bit*/ io_info->ra_capable = raid->capability.ra_capable; /* * if rowDataSize @RAID map and spanRowDataSize @SPAN INFO are zero * return FALSE */ if (raid->rowDataSize == 0) { if (MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize == 0) return false; else if (instance->UnevenSpanSupport) { io_info->IoforUnevenSpan = 1; } else { dev_info(&instance->pdev->dev, "raid->rowDataSize is 0, but has SPAN[0]" "rowDataSize = 0x%0x," "but there is _NO_ UnevenSpanSupport\n", MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize); return false; } } stripSize = 1 << raid->stripeShift; stripe_mask = stripSize-1; io_info->data_arms = raid->rowDataSize; /* * calculate starting row and stripe, and number of strips and rows */ start_strip = ldStartBlock >> raid->stripeShift; ref_in_start_stripe = (u16)(ldStartBlock & stripe_mask); endLba = ldStartBlock + numBlocks - 1; ref_in_end_stripe = (u16)(endLba & stripe_mask); endStrip = endLba >> raid->stripeShift; num_strips = (u8)(endStrip - start_strip + 1); /* End strip */ if (io_info->IoforUnevenSpan) { start_row = get_row_from_strip(instance, ld, start_strip, map); endRow = get_row_from_strip(instance, ld, endStrip, map); if (start_row == -1ULL || endRow == -1ULL) { dev_info(&instance->pdev->dev, "return from %s %d." "Send IO w/o region lock.\n", __func__, __LINE__); return false; } if (raid->spanDepth == 1) { startlba_span = 0; *pdBlock = start_row << raid->stripeShift; } else startlba_span = (u8)mr_spanset_get_span_block(instance, ld, start_row, pdBlock, map); if (startlba_span == SPAN_INVALID) { dev_info(&instance->pdev->dev, "return from %s %d" "for row 0x%llx,start strip %llx" "endSrip %llx\n", __func__, __LINE__, (unsigned long long)start_row, (unsigned long long)start_strip, (unsigned long long)endStrip); return false; } io_info->start_span = startlba_span; io_info->start_row = start_row; } else { start_row = mega_div64_32(start_strip, raid->rowDataSize); endRow = mega_div64_32(endStrip, raid->rowDataSize); } numRows = (u8)(endRow - start_row + 1); /* * calculate region info. */ /* assume region is at the start of the first row */ regStart = start_row << raid->stripeShift; /* assume this IO needs the full row - we'll adjust if not true */ regSize = stripSize; io_info->do_fp_rlbypass = raid->capability.fpBypassRegionLock; /* Check if we can send this I/O via FastPath */ if (raid->capability.fpCapable) { if (isRead) io_info->fpOkForIo = (raid->capability.fpReadCapable && ((num_strips == 1) || raid->capability. fpReadAcrossStripe)); else io_info->fpOkForIo = (raid->capability.fpWriteCapable && ((num_strips == 1) || raid->capability. fpWriteAcrossStripe)); } else io_info->fpOkForIo = false; if (numRows == 1) { /* single-strip IOs can always lock only the data needed */ if (num_strips == 1) { regStart += ref_in_start_stripe; regSize = numBlocks; } /* multi-strip IOs always need to full stripe locked */ } else if (io_info->IoforUnevenSpan == 0) { /* * For Even span region lock optimization. * If the start strip is the last in the start row */ if (start_strip == (start_row + 1) * raid->rowDataSize - 1) { regStart += ref_in_start_stripe; /* initialize count to sectors from startref to end of strip */ regSize = stripSize - ref_in_start_stripe; } /* add complete rows in the middle of the transfer */ if (numRows > 2) regSize += (numRows-2) << raid->stripeShift; /* if IO ends within first strip of last row*/ if (endStrip == endRow*raid->rowDataSize) regSize += ref_in_end_stripe+1; else regSize += stripSize; } else { /* * For Uneven span region lock optimization. * If the start strip is the last in the start row */ if (start_strip == (get_strip_from_row(instance, ld, start_row, map) + SPAN_ROW_DATA_SIZE(map, ld, startlba_span) - 1)) { regStart += ref_in_start_stripe; /* initialize count to sectors from * startRef to end of strip */ regSize = stripSize - ref_in_start_stripe; } /* Add complete rows in the middle of the transfer*/ if (numRows > 2) /* Add complete rows in the middle of the transfer*/ regSize += (numRows-2) << raid->stripeShift; /* if IO ends within first strip of last row */ if (endStrip == get_strip_from_row(instance, ld, endRow, map)) regSize += ref_in_end_stripe + 1; else regSize += stripSize; } pRAID_Context->timeout_value = cpu_to_le16(raid->fpIoTimeoutForLd ? raid->fpIoTimeoutForLd : map->raidMap.fpPdIoTimeoutSec); if (instance->adapter_type == INVADER_SERIES) pRAID_Context->reg_lock_flags = (isRead) ? raid->regTypeReqOnRead : raid->regTypeReqOnWrite; else if (instance->adapter_type == THUNDERBOLT_SERIES) pRAID_Context->reg_lock_flags = (isRead) ? REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite; pRAID_Context->virtual_disk_tgt_id = raid->targetId; pRAID_Context->reg_lock_row_lba = cpu_to_le64(regStart); pRAID_Context->reg_lock_length = cpu_to_le32(regSize); pRAID_Context->config_seq_num = raid->seqNum; /* save pointer to raid->LUN array */ *raidLUN = raid->LUN; /* Aero R5/6 Division Offload for WRITE */ if (fusion->r56_div_offload && (raid->level >= 5) && !isRead) { mr_get_phy_params_r56_rmw(instance, ld, start_strip, io_info, (struct RAID_CONTEXT_G35 *)pRAID_Context, map); return true; } /*Get Phy Params only if FP capable, or else leave it to MR firmware to do the calculation.*/ if (io_info->fpOkForIo) { retval = io_info->IoforUnevenSpan ? mr_spanset_get_phy_params(instance, ld, start_strip, ref_in_start_stripe, io_info, pRAID_Context, map) : MR_GetPhyParams(instance, ld, start_strip, ref_in_start_stripe, io_info, pRAID_Context, map); /* If IO on an invalid Pd, then FP is not possible.*/ if (io_info->devHandle == MR_DEVHANDLE_INVALID) io_info->fpOkForIo = false; return retval; } else if (isRead) { uint stripIdx; for (stripIdx = 0; stripIdx < num_strips; stripIdx++) { retval = io_info->IoforUnevenSpan ? mr_spanset_get_phy_params(instance, ld, start_strip + stripIdx, ref_in_start_stripe, io_info, pRAID_Context, map) : MR_GetPhyParams(instance, ld, start_strip + stripIdx, ref_in_start_stripe, io_info, pRAID_Context, map); if (!retval) return true; } } return true; } /* ****************************************************************************** * * This routine pepare spanset info from Valid Raid map and store it into * local copy of ldSpanInfo per instance data structure. * * Inputs : * map - LD map * ldSpanInfo - ldSpanInfo per HBA instance * */ void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo) { u8 span, count; u32 element, span_row_width; u64 span_row; struct MR_LD_RAID *raid; LD_SPAN_SET *span_set, *span_set_prev; struct MR_QUAD_ELEMENT *quad; int ldCount; u16 ld; for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) { ld = MR_TargetIdToLdGet(ldCount, map); if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1)) continue; raid = MR_LdRaidGet(ld, map); for (element = 0; element < MAX_QUAD_DEPTH; element++) { for (span = 0; span < raid->spanDepth; span++) { if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. block_span_info.noElements) < element + 1) continue; span_set = &(ldSpanInfo[ld].span_set[element]); quad = &map->raidMap.ldSpanMap[ld]. spanBlock[span].block_span_info. quad[element]; span_set->diff = le32_to_cpu(quad->diff); for (count = 0, span_row_width = 0; count < raid->spanDepth; count++) { if (le32_to_cpu(map->raidMap.ldSpanMap[ld]. spanBlock[count]. block_span_info. noElements) >= element + 1) { span_set->strip_offset[count] = span_row_width; span_row_width += MR_LdSpanPtrGet (ld, count, map)->spanRowDataSize; } } span_set->span_row_data_width = span_row_width; span_row = mega_div64_32(((le64_to_cpu(quad->logEnd) - le64_to_cpu(quad->logStart)) + le32_to_cpu(quad->diff)), le32_to_cpu(quad->diff)); if (element == 0) { span_set->log_start_lba = 0; span_set->log_end_lba = ((span_row << raid->stripeShift) * span_row_width) - 1; span_set->span_row_start = 0; span_set->span_row_end = span_row - 1; span_set->data_strip_start = 0; span_set->data_strip_end = (span_row * span_row_width) - 1; span_set->data_row_start = 0; span_set->data_row_end = (span_row * le32_to_cpu(quad->diff)) - 1; } else { span_set_prev = &(ldSpanInfo[ld]. span_set[element - 1]); span_set->log_start_lba = span_set_prev->log_end_lba + 1; span_set->log_end_lba = span_set->log_start_lba + ((span_row << raid->stripeShift) * span_row_width) - 1; span_set->span_row_start = span_set_prev->span_row_end + 1; span_set->span_row_end = span_set->span_row_start + span_row - 1; span_set->data_strip_start = span_set_prev->data_strip_end + 1; span_set->data_strip_end = span_set->data_strip_start + (span_row * span_row_width) - 1; span_set->data_row_start = span_set_prev->data_row_end + 1; span_set->data_row_end = span_set->data_row_start + (span_row * le32_to_cpu(quad->diff)) - 1; } break; } if (span == raid->spanDepth) break; } } } void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map, struct LD_LOAD_BALANCE_INFO *lbInfo) { int ldCount; u16 ld; struct MR_LD_RAID *raid; if (lb_pending_cmds > 128 || lb_pending_cmds < 1) lb_pending_cmds = LB_PENDING_CMDS_DEFAULT; for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) { ld = MR_TargetIdToLdGet(ldCount, drv_map); if (ld >= MAX_LOGICAL_DRIVES_EXT - 1) { lbInfo[ldCount].loadBalanceFlag = 0; continue; } raid = MR_LdRaidGet(ld, drv_map); if ((raid->level != 1) || (raid->ldState != MR_LD_STATE_OPTIMAL)) { lbInfo[ldCount].loadBalanceFlag = 0; continue; } lbInfo[ldCount].loadBalanceFlag = 1; } } static u8 megasas_get_best_arm_pd(struct megasas_instance *instance, struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *io_info, struct MR_DRV_RAID_MAP_ALL *drv_map) { struct MR_LD_RAID *raid; u16 pd1_dev_handle; u16 pend0, pend1, ld; u64 diff0, diff1; u8 bestArm, pd0, pd1, span, arm; u32 arRef, span_row_size; u64 block = io_info->ldStartBlock; u32 count = io_info->numBlocks; span = ((io_info->span_arm & RAID_CTX_SPANARM_SPAN_MASK) >> RAID_CTX_SPANARM_SPAN_SHIFT); arm = (io_info->span_arm & RAID_CTX_SPANARM_ARM_MASK); ld = MR_TargetIdToLdGet(io_info->ldTgtId, drv_map); raid = MR_LdRaidGet(ld, drv_map); span_row_size = instance->UnevenSpanSupport ? SPAN_ROW_SIZE(drv_map, ld, span) : raid->rowSize; arRef = MR_LdSpanArrayGet(ld, span, drv_map); pd0 = MR_ArPdGet(arRef, arm, drv_map); pd1 = MR_ArPdGet(arRef, (arm + 1) >= span_row_size ? (arm + 1 - span_row_size) : arm + 1, drv_map); /* Get PD1 Dev Handle */ pd1_dev_handle = MR_PdDevHandleGet(pd1, drv_map); if (pd1_dev_handle == MR_DEVHANDLE_INVALID) { bestArm = arm; } else { /* get the pending cmds for the data and mirror arms */ pend0 = atomic_read(&lbInfo->scsi_pending_cmds[pd0]); pend1 = atomic_read(&lbInfo->scsi_pending_cmds[pd1]); /* Determine the disk whose head is nearer to the req. block */ diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]); diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]); bestArm = (diff0 <= diff1 ? arm : arm ^ 1); /* Make balance count from 16 to 4 to * keep driver in sync with Firmware */ if ((bestArm == arm && pend0 > pend1 + lb_pending_cmds) || (bestArm != arm && pend1 > pend0 + lb_pending_cmds)) bestArm ^= 1; /* Update the last accessed block on the correct pd */ io_info->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm; io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1; } lbInfo->last_accessed_block[io_info->pd_after_lb] = block + count - 1; return io_info->pd_after_lb; } __le16 get_updated_dev_handle(struct megasas_instance *instance, struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *io_info, struct MR_DRV_RAID_MAP_ALL *drv_map) { u8 arm_pd; __le16 devHandle; /* get best new arm (PD ID) */ arm_pd = megasas_get_best_arm_pd(instance, lbInfo, io_info, drv_map); devHandle = MR_PdDevHandleGet(arm_pd, drv_map); io_info->pd_interface = MR_PdInterfaceTypeGet(arm_pd, drv_map); atomic_inc(&lbInfo->scsi_pending_cmds[arm_pd]); return devHandle; }
linux-master
drivers/scsi/megaraid/megaraid_sas_fp.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * * Linux MegaRAID device driver * * Copyright (c) 2003-2004 LSI Logic Corporation. * * FILE : megaraid_mbox.c * Version : v2.20.5.1 (Nov 16 2006) * * Authors: * Atul Mukker <[email protected]> * Sreenivas Bagalkote <[email protected]> * Manoj Jose <[email protected]> * Seokmann Ju * * List of supported controllers * * OEM Product Name VID DID SSVID SSID * --- ------------ --- --- ---- ---- * Dell PERC3/QC 101E 1960 1028 0471 * Dell PERC3/DC 101E 1960 1028 0493 * Dell PERC3/SC 101E 1960 1028 0475 * Dell PERC3/Di 1028 1960 1028 0123 * Dell PERC4/SC 1000 1960 1028 0520 * Dell PERC4/DC 1000 1960 1028 0518 * Dell PERC4/QC 1000 0407 1028 0531 * Dell PERC4/Di 1028 000F 1028 014A * Dell PERC 4e/Si 1028 0013 1028 016c * Dell PERC 4e/Di 1028 0013 1028 016d * Dell PERC 4e/Di 1028 0013 1028 016e * Dell PERC 4e/Di 1028 0013 1028 016f * Dell PERC 4e/Di 1028 0013 1028 0170 * Dell PERC 4e/DC 1000 0408 1028 0002 * Dell PERC 4e/SC 1000 0408 1028 0001 * * LSI MegaRAID SCSI 320-0 1000 1960 1000 A520 * LSI MegaRAID SCSI 320-1 1000 1960 1000 0520 * LSI MegaRAID SCSI 320-2 1000 1960 1000 0518 * LSI MegaRAID SCSI 320-0X 1000 0407 1000 0530 * LSI MegaRAID SCSI 320-2X 1000 0407 1000 0532 * LSI MegaRAID SCSI 320-4X 1000 0407 1000 0531 * LSI MegaRAID SCSI 320-1E 1000 0408 1000 0001 * LSI MegaRAID SCSI 320-2E 1000 0408 1000 0002 * LSI MegaRAID SATA 150-4 1000 1960 1000 4523 * LSI MegaRAID SATA 150-6 1000 1960 1000 0523 * LSI MegaRAID SATA 300-4X 1000 0409 1000 3004 * LSI MegaRAID SATA 300-8X 1000 0409 1000 3008 * * INTEL RAID Controller SRCU42X 1000 0407 8086 0532 * INTEL RAID Controller SRCS16 1000 1960 8086 0523 * INTEL RAID Controller SRCU42E 1000 0408 8086 0002 * INTEL RAID Controller SRCZCRX 1000 0407 8086 0530 * INTEL RAID Controller SRCS28X 1000 0409 8086 3008 * INTEL RAID Controller SROMBU42E 1000 0408 8086 3431 * INTEL RAID Controller SROMBU42E 1000 0408 8086 3499 * INTEL RAID Controller SRCU51L 1000 1960 8086 0520 * * FSC MegaRAID PCI Express ROMB 1000 0408 1734 1065 * * ACER MegaRAID ROMB-2E 1000 0408 1025 004D * * NEC MegaRAID PCI Express ROMB 1000 0408 1033 8287 * * For history of changes, see Documentation/scsi/ChangeLog.megaraid */ #include <linux/slab.h> #include <linux/module.h> #include "megaraid_mbox.h" static int megaraid_init(void); static void megaraid_exit(void); static int megaraid_probe_one(struct pci_dev*, const struct pci_device_id *); static void megaraid_detach_one(struct pci_dev *); static void megaraid_mbox_shutdown(struct pci_dev *); static int megaraid_io_attach(adapter_t *); static void megaraid_io_detach(adapter_t *); static int megaraid_init_mbox(adapter_t *); static void megaraid_fini_mbox(adapter_t *); static int megaraid_alloc_cmd_packets(adapter_t *); static void megaraid_free_cmd_packets(adapter_t *); static int megaraid_mbox_setup_dma_pools(adapter_t *); static void megaraid_mbox_teardown_dma_pools(adapter_t *); static int megaraid_sysfs_alloc_resources(adapter_t *); static void megaraid_sysfs_free_resources(adapter_t *); static int megaraid_abort_handler(struct scsi_cmnd *); static int megaraid_reset_handler(struct scsi_cmnd *); static int mbox_post_sync_cmd(adapter_t *, uint8_t []); static int mbox_post_sync_cmd_fast(adapter_t *, uint8_t []); static int megaraid_busywait_mbox(mraid_device_t *); static int megaraid_mbox_product_info(adapter_t *); static int megaraid_mbox_extended_cdb(adapter_t *); static int megaraid_mbox_support_ha(adapter_t *, uint16_t *); static int megaraid_mbox_support_random_del(adapter_t *); static int megaraid_mbox_get_max_sg(adapter_t *); static void megaraid_mbox_enum_raid_scsi(adapter_t *); static void megaraid_mbox_flush_cache(adapter_t *); static int megaraid_mbox_fire_sync_cmd(adapter_t *); static void megaraid_mbox_display_scb(adapter_t *, scb_t *); static void megaraid_mbox_setup_device_map(adapter_t *); static int megaraid_queue_command(struct Scsi_Host *, struct scsi_cmnd *); static scb_t *megaraid_mbox_build_cmd(adapter_t *, struct scsi_cmnd *, int *); static void megaraid_mbox_runpendq(adapter_t *, scb_t *); static void megaraid_mbox_prepare_pthru(adapter_t *, scb_t *, struct scsi_cmnd *); static void megaraid_mbox_prepare_epthru(adapter_t *, scb_t *, struct scsi_cmnd *); static irqreturn_t megaraid_isr(int, void *); static void megaraid_mbox_dpc(unsigned long); static ssize_t megaraid_mbox_app_hndl_show(struct device *, struct device_attribute *attr, char *); static ssize_t megaraid_mbox_ld_show(struct device *, struct device_attribute *attr, char *); static int megaraid_cmm_register(adapter_t *); static int megaraid_cmm_unregister(adapter_t *); static int megaraid_mbox_mm_handler(unsigned long, uioc_t *, uint32_t); static int megaraid_mbox_mm_command(adapter_t *, uioc_t *); static void megaraid_mbox_mm_done(adapter_t *, scb_t *); static int gather_hbainfo(adapter_t *, mraid_hba_info_t *); static int wait_till_fw_empty(adapter_t *); MODULE_AUTHOR("[email protected]"); MODULE_DESCRIPTION("LSI Logic MegaRAID Mailbox Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(MEGARAID_VERSION); /* * ### modules parameters for driver ### */ /* * Set to enable driver to expose unconfigured disk to kernel */ static int megaraid_expose_unconf_disks = 0; module_param_named(unconf_disks, megaraid_expose_unconf_disks, int, 0); MODULE_PARM_DESC(unconf_disks, "Set to expose unconfigured disks to kernel (default=0)"); /* * driver wait time if the adapter's mailbox is busy */ static unsigned int max_mbox_busy_wait = MBOX_BUSY_WAIT; module_param_named(busy_wait, max_mbox_busy_wait, int, 0); MODULE_PARM_DESC(busy_wait, "Max wait for mailbox in microseconds if busy (default=10)"); /* * number of sectors per IO command */ static unsigned int megaraid_max_sectors = MBOX_MAX_SECTORS; module_param_named(max_sectors, megaraid_max_sectors, int, 0); MODULE_PARM_DESC(max_sectors, "Maximum number of sectors per IO command (default=128)"); /* * number of commands per logical unit */ static unsigned int megaraid_cmd_per_lun = MBOX_DEF_CMD_PER_LUN; module_param_named(cmd_per_lun, megaraid_cmd_per_lun, int, 0); MODULE_PARM_DESC(cmd_per_lun, "Maximum number of commands per logical unit (default=64)"); /* * Fast driver load option, skip scanning for physical devices during load. * This would result in non-disk devices being skipped during driver load * time. These can be later added though, using /proc/scsi/scsi */ static unsigned int megaraid_fast_load; module_param_named(fast_load, megaraid_fast_load, int, 0); MODULE_PARM_DESC(fast_load, "Faster loading of the driver, skips physical devices! (default=0)"); /* * mraid_debug level - threshold for amount of information to be displayed by * the driver. This level can be changed through modules parameters, ioctl or * sysfs/proc interface. By default, print the announcement messages only. */ int mraid_debug_level = CL_ANN; module_param_named(debug_level, mraid_debug_level, int, 0); MODULE_PARM_DESC(debug_level, "Debug level for driver (default=0)"); /* * PCI table for all supported controllers. */ static struct pci_device_id pci_id_table_g[] = { { PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_PERC4_DI_DISCOVERY, PCI_VENDOR_ID_DELL, PCI_SUBSYS_ID_PERC4_DI_DISCOVERY, }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_PERC4_SC, PCI_VENDOR_ID_DELL, PCI_SUBSYS_ID_PERC4_SC, }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_PERC4_DC, PCI_VENDOR_ID_DELL, PCI_SUBSYS_ID_PERC4_DC, }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_VERDE, PCI_ANY_ID, PCI_ANY_ID, }, { PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_PERC4_DI_EVERGLADES, PCI_VENDOR_ID_DELL, PCI_SUBSYS_ID_PERC4_DI_EVERGLADES, }, { PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_PERC4E_SI_BIGBEND, PCI_VENDOR_ID_DELL, PCI_SUBSYS_ID_PERC4E_SI_BIGBEND, }, { PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_PERC4E_DI_KOBUK, PCI_VENDOR_ID_DELL, PCI_SUBSYS_ID_PERC4E_DI_KOBUK, }, { PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_PERC4E_DI_CORVETTE, PCI_VENDOR_ID_DELL, PCI_SUBSYS_ID_PERC4E_DI_CORVETTE, }, { PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_PERC4E_DI_EXPEDITION, PCI_VENDOR_ID_DELL, PCI_SUBSYS_ID_PERC4E_DI_EXPEDITION, }, { PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_PERC4E_DI_GUADALUPE, PCI_VENDOR_ID_DELL, PCI_SUBSYS_ID_PERC4E_DI_GUADALUPE, }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_DOBSON, PCI_ANY_ID, PCI_ANY_ID, }, { PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID3, PCI_ANY_ID, PCI_ANY_ID, }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_AMI_MEGARAID3, PCI_ANY_ID, PCI_ANY_ID, }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LINDSAY, PCI_ANY_ID, PCI_ANY_ID, }, {0} /* Terminating entry */ }; MODULE_DEVICE_TABLE(pci, pci_id_table_g); static struct pci_driver megaraid_pci_driver = { .name = "megaraid", .id_table = pci_id_table_g, .probe = megaraid_probe_one, .remove = megaraid_detach_one, .shutdown = megaraid_mbox_shutdown, }; // definitions for the device attributes for exporting logical drive number // for a scsi address (Host, Channel, Id, Lun) static DEVICE_ATTR_ADMIN_RO(megaraid_mbox_app_hndl); // Host template initializer for megaraid mbox sysfs device attributes static struct attribute *megaraid_shost_attrs[] = { &dev_attr_megaraid_mbox_app_hndl.attr, NULL, }; ATTRIBUTE_GROUPS(megaraid_shost); static DEVICE_ATTR_ADMIN_RO(megaraid_mbox_ld); // Host template initializer for megaraid mbox sysfs device attributes static struct attribute *megaraid_sdev_attrs[] = { &dev_attr_megaraid_mbox_ld.attr, NULL, }; ATTRIBUTE_GROUPS(megaraid_sdev); /* * Scsi host template for megaraid unified driver */ static const struct scsi_host_template megaraid_template_g = { .module = THIS_MODULE, .name = "LSI Logic MegaRAID driver", .proc_name = "megaraid", .queuecommand = megaraid_queue_command, .eh_abort_handler = megaraid_abort_handler, .eh_host_reset_handler = megaraid_reset_handler, .change_queue_depth = scsi_change_queue_depth, .no_write_same = 1, .sdev_groups = megaraid_sdev_groups, .shost_groups = megaraid_shost_groups, }; /** * megaraid_init - module load hook * * We register ourselves as hotplug enabled module and let PCI subsystem * discover our adapters. */ static int __init megaraid_init(void) { int rval; // Announce the driver version con_log(CL_ANN, (KERN_INFO "megaraid: %s %s\n", MEGARAID_VERSION, MEGARAID_EXT_VERSION)); // check validity of module parameters if (megaraid_cmd_per_lun > MBOX_MAX_SCSI_CMDS) { con_log(CL_ANN, (KERN_WARNING "megaraid mailbox: max commands per lun reset to %d\n", MBOX_MAX_SCSI_CMDS)); megaraid_cmd_per_lun = MBOX_MAX_SCSI_CMDS; } // register as a PCI hot-plug driver module rval = pci_register_driver(&megaraid_pci_driver); if (rval < 0) { con_log(CL_ANN, (KERN_WARNING "megaraid: could not register hotplug support.\n")); } return rval; } /** * megaraid_exit - driver unload entry point * * We simply unwrap the megaraid_init routine here. */ static void __exit megaraid_exit(void) { con_log(CL_DLEVEL1, (KERN_NOTICE "megaraid: unloading framework\n")); // unregister as PCI hotplug driver pci_unregister_driver(&megaraid_pci_driver); return; } /** * megaraid_probe_one - PCI hotplug entry point * @pdev : handle to this controller's PCI configuration space * @id : pci device id of the class of controllers * * This routine should be called whenever a new adapter is detected by the * PCI hotplug susbsystem. */ static int megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) { adapter_t *adapter; // detected a new controller con_log(CL_ANN, (KERN_INFO "megaraid: probe new device %#4.04x:%#4.04x:%#4.04x:%#4.04x: ", pdev->vendor, pdev->device, pdev->subsystem_vendor, pdev->subsystem_device)); con_log(CL_ANN, ("bus %d:slot %d:func %d\n", pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn))); if (pci_enable_device(pdev)) { con_log(CL_ANN, (KERN_WARNING "megaraid: pci_enable_device failed\n")); return -ENODEV; } // Enable bus-mastering on this controller pci_set_master(pdev); // Allocate the per driver initialization structure adapter = kzalloc(sizeof(adapter_t), GFP_KERNEL); if (adapter == NULL) { con_log(CL_ANN, (KERN_WARNING "megaraid: out of memory, %s %d.\n", __func__, __LINE__)); goto out_probe_one; } // set up PCI related soft state and other pre-known parameters adapter->unique_id = pci_dev_id(pdev); adapter->irq = pdev->irq; adapter->pdev = pdev; atomic_set(&adapter->being_detached, 0); // Setup the default DMA mask. This would be changed later on // depending on hardware capabilities if (dma_set_mask(&adapter->pdev->dev, DMA_BIT_MASK(32))) { con_log(CL_ANN, (KERN_WARNING "megaraid: dma_set_mask failed:%d\n", __LINE__)); goto out_free_adapter; } // Initialize the synchronization lock for kernel and LLD spin_lock_init(&adapter->lock); // Initialize the command queues: the list of free SCBs and the list // of pending SCBs. INIT_LIST_HEAD(&adapter->kscb_pool); spin_lock_init(SCSI_FREE_LIST_LOCK(adapter)); INIT_LIST_HEAD(&adapter->pend_list); spin_lock_init(PENDING_LIST_LOCK(adapter)); INIT_LIST_HEAD(&adapter->completed_list); spin_lock_init(COMPLETED_LIST_LOCK(adapter)); // Start the mailbox based controller if (megaraid_init_mbox(adapter) != 0) { con_log(CL_ANN, (KERN_WARNING "megaraid: mailbox adapter did not initialize\n")); goto out_free_adapter; } // Register with LSI Common Management Module if (megaraid_cmm_register(adapter) != 0) { con_log(CL_ANN, (KERN_WARNING "megaraid: could not register with management module\n")); goto out_fini_mbox; } // setup adapter handle in PCI soft state pci_set_drvdata(pdev, adapter); // attach with scsi mid-layer if (megaraid_io_attach(adapter) != 0) { con_log(CL_ANN, (KERN_WARNING "megaraid: io attach failed\n")); goto out_cmm_unreg; } return 0; out_cmm_unreg: megaraid_cmm_unregister(adapter); out_fini_mbox: megaraid_fini_mbox(adapter); out_free_adapter: kfree(adapter); out_probe_one: pci_disable_device(pdev); return -ENODEV; } /** * megaraid_detach_one - release framework resources and call LLD release routine * @pdev : handle for our PCI configuration space * * This routine is called during driver unload. We free all the allocated * resources and call the corresponding LLD so that it can also release all * its resources. * * This routine is also called from the PCI hotplug system. */ static void megaraid_detach_one(struct pci_dev *pdev) { adapter_t *adapter; struct Scsi_Host *host; // Start a rollback on this adapter adapter = pci_get_drvdata(pdev); if (!adapter) { con_log(CL_ANN, (KERN_CRIT "megaraid: Invalid detach on %#4.04x:%#4.04x:%#4.04x:%#4.04x\n", pdev->vendor, pdev->device, pdev->subsystem_vendor, pdev->subsystem_device)); return; } else { con_log(CL_ANN, (KERN_NOTICE "megaraid: detaching device %#4.04x:%#4.04x:%#4.04x:%#4.04x\n", pdev->vendor, pdev->device, pdev->subsystem_vendor, pdev->subsystem_device)); } host = adapter->host; // do not allow any more requests from the management module for this // adapter. // FIXME: How do we account for the request which might still be // pending with us? atomic_set(&adapter->being_detached, 1); // detach from the IO sub-system megaraid_io_detach(adapter); // Unregister from common management module // // FIXME: this must return success or failure for conditions if there // is a command pending with LLD or not. megaraid_cmm_unregister(adapter); // finalize the mailbox based controller and release all resources megaraid_fini_mbox(adapter); kfree(adapter); scsi_host_put(host); pci_disable_device(pdev); return; } /** * megaraid_mbox_shutdown - PCI shutdown for megaraid HBA * @pdev : generic driver model device * * Shutdown notification, perform flush cache. */ static void megaraid_mbox_shutdown(struct pci_dev *pdev) { adapter_t *adapter = pci_get_drvdata(pdev); static int counter; if (!adapter) { con_log(CL_ANN, (KERN_WARNING "megaraid: null device in shutdown\n")); return; } // flush caches now con_log(CL_ANN, (KERN_INFO "megaraid: flushing adapter %d...", counter++)); megaraid_mbox_flush_cache(adapter); con_log(CL_ANN, ("done\n")); } /** * megaraid_io_attach - attach a device with the IO subsystem * @adapter : controller's soft state * * Attach this device with the IO subsystem. */ static int megaraid_io_attach(adapter_t *adapter) { struct Scsi_Host *host; // Initialize SCSI Host structure host = scsi_host_alloc(&megaraid_template_g, 8); if (!host) { con_log(CL_ANN, (KERN_WARNING "megaraid mbox: scsi_register failed\n")); return -1; } SCSIHOST2ADAP(host) = (caddr_t)adapter; adapter->host = host; host->irq = adapter->irq; host->unique_id = adapter->unique_id; host->can_queue = adapter->max_cmds; host->this_id = adapter->init_id; host->sg_tablesize = adapter->sglen; host->max_sectors = adapter->max_sectors; host->cmd_per_lun = adapter->cmd_per_lun; host->max_channel = adapter->max_channel; host->max_id = adapter->max_target; host->max_lun = adapter->max_lun; // notify mid-layer about the new controller if (scsi_add_host(host, &adapter->pdev->dev)) { con_log(CL_ANN, (KERN_WARNING "megaraid mbox: scsi_add_host failed\n")); scsi_host_put(host); return -1; } scsi_scan_host(host); return 0; } /** * megaraid_io_detach - detach a device from the IO subsystem * @adapter : controller's soft state * * Detach this device from the IO subsystem. */ static void megaraid_io_detach(adapter_t *adapter) { struct Scsi_Host *host; con_log(CL_DLEVEL1, (KERN_INFO "megaraid: io detach\n")); host = adapter->host; scsi_remove_host(host); return; } /* * START: Mailbox Low Level Driver * * This is section specific to the single mailbox based controllers */ /** * megaraid_init_mbox - initialize controller * @adapter : our soft state * * - Allocate 16-byte aligned mailbox memory for firmware handshake * - Allocate controller's memory resources * - Find out all initialization data * - Allocate memory required for all the commands * - Use internal library of FW routines, build up complete soft state */ static int megaraid_init_mbox(adapter_t *adapter) { struct pci_dev *pdev; mraid_device_t *raid_dev; int i; uint32_t magic64; adapter->ito = MBOX_TIMEOUT; pdev = adapter->pdev; /* * Allocate and initialize the init data structure for mailbox * controllers */ raid_dev = kzalloc(sizeof(mraid_device_t), GFP_KERNEL); if (raid_dev == NULL) return -1; /* * Attach the adapter soft state to raid device soft state */ adapter->raid_device = (caddr_t)raid_dev; raid_dev->fast_load = megaraid_fast_load; // our baseport raid_dev->baseport = pci_resource_start(pdev, 0); if (pci_request_regions(pdev, "MegaRAID: LSI Logic Corporation") != 0) { con_log(CL_ANN, (KERN_WARNING "megaraid: mem region busy\n")); goto out_free_raid_dev; } raid_dev->baseaddr = ioremap(raid_dev->baseport, 128); if (!raid_dev->baseaddr) { con_log(CL_ANN, (KERN_WARNING "megaraid: could not map hba memory\n") ); goto out_release_regions; } /* initialize the mutual exclusion lock for the mailbox */ spin_lock_init(&raid_dev->mailbox_lock); /* allocate memory required for commands */ if (megaraid_alloc_cmd_packets(adapter) != 0) goto out_iounmap; /* * Issue SYNC cmd to flush the pending cmds in the adapter * and initialize its internal state */ if (megaraid_mbox_fire_sync_cmd(adapter)) con_log(CL_ANN, ("megaraid: sync cmd failed\n")); /* * Setup the rest of the soft state using the library of * FW routines */ /* request IRQ and register the interrupt service routine */ if (request_irq(adapter->irq, megaraid_isr, IRQF_SHARED, "megaraid", adapter)) { con_log(CL_ANN, (KERN_WARNING "megaraid: Couldn't register IRQ %d!\n", adapter->irq)); goto out_alloc_cmds; } // Product info if (megaraid_mbox_product_info(adapter) != 0) goto out_free_irq; // Do we support extended CDBs adapter->max_cdb_sz = 10; if (megaraid_mbox_extended_cdb(adapter) == 0) { adapter->max_cdb_sz = 16; } /* * Do we support cluster environment, if we do, what is the initiator * id. * NOTE: In a non-cluster aware firmware environment, the LLD should * return 7 as initiator id. */ adapter->ha = 0; adapter->init_id = -1; if (megaraid_mbox_support_ha(adapter, &adapter->init_id) == 0) { adapter->ha = 1; } /* * Prepare the device ids array to have the mapping between the kernel * device address and megaraid device address. * We export the physical devices on their actual addresses. The * logical drives are exported on a virtual SCSI channel */ megaraid_mbox_setup_device_map(adapter); // If the firmware supports random deletion, update the device id map if (megaraid_mbox_support_random_del(adapter)) { // Change the logical drives numbers in device_ids array one // slot in device_ids is reserved for target id, that's why // "<=" below for (i = 0; i <= MAX_LOGICAL_DRIVES_40LD; i++) { adapter->device_ids[adapter->max_channel][i] += 0x80; } adapter->device_ids[adapter->max_channel][adapter->init_id] = 0xFF; raid_dev->random_del_supported = 1; } /* * find out the maximum number of scatter-gather elements supported by * this firmware */ adapter->sglen = megaraid_mbox_get_max_sg(adapter); // enumerate RAID and SCSI channels so that all devices on SCSI // channels can later be exported, including disk devices megaraid_mbox_enum_raid_scsi(adapter); /* * Other parameters required by upper layer * * maximum number of sectors per IO command */ adapter->max_sectors = megaraid_max_sectors; /* * number of queued commands per LUN. */ adapter->cmd_per_lun = megaraid_cmd_per_lun; /* * Allocate resources required to issue FW calls, when sysfs is * accessed */ if (megaraid_sysfs_alloc_resources(adapter) != 0) goto out_free_irq; // Set the DMA mask to 64-bit. All supported controllers as capable of // DMA in this range pci_read_config_dword(adapter->pdev, PCI_CONF_AMISIG64, &magic64); if (((magic64 == HBA_SIGNATURE_64_BIT) && ((adapter->pdev->subsystem_device != PCI_SUBSYS_ID_MEGARAID_SATA_150_6) && (adapter->pdev->subsystem_device != PCI_SUBSYS_ID_MEGARAID_SATA_150_4))) || (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC && adapter->pdev->device == PCI_DEVICE_ID_VERDE) || (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC && adapter->pdev->device == PCI_DEVICE_ID_DOBSON) || (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC && adapter->pdev->device == PCI_DEVICE_ID_LINDSAY) || (adapter->pdev->vendor == PCI_VENDOR_ID_DELL && adapter->pdev->device == PCI_DEVICE_ID_PERC4_DI_EVERGLADES) || (adapter->pdev->vendor == PCI_VENDOR_ID_DELL && adapter->pdev->device == PCI_DEVICE_ID_PERC4E_DI_KOBUK)) { if (dma_set_mask(&adapter->pdev->dev, DMA_BIT_MASK(64))) { con_log(CL_ANN, (KERN_WARNING "megaraid: DMA mask for 64-bit failed\n")); if (dma_set_mask(&adapter->pdev->dev, DMA_BIT_MASK(32))) { con_log(CL_ANN, (KERN_WARNING "megaraid: 32-bit DMA mask failed\n")); goto out_free_sysfs_res; } } } // setup tasklet for DPC tasklet_init(&adapter->dpc_h, megaraid_mbox_dpc, (unsigned long)adapter); con_log(CL_DLEVEL1, (KERN_INFO "megaraid mbox hba successfully initialized\n")); return 0; out_free_sysfs_res: megaraid_sysfs_free_resources(adapter); out_free_irq: free_irq(adapter->irq, adapter); out_alloc_cmds: megaraid_free_cmd_packets(adapter); out_iounmap: iounmap(raid_dev->baseaddr); out_release_regions: pci_release_regions(pdev); out_free_raid_dev: kfree(raid_dev); return -1; } /** * megaraid_fini_mbox - undo controller initialization * @adapter : our soft state */ static void megaraid_fini_mbox(adapter_t *adapter) { mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); // flush all caches megaraid_mbox_flush_cache(adapter); tasklet_kill(&adapter->dpc_h); megaraid_sysfs_free_resources(adapter); megaraid_free_cmd_packets(adapter); free_irq(adapter->irq, adapter); iounmap(raid_dev->baseaddr); pci_release_regions(adapter->pdev); kfree(raid_dev); return; } /** * megaraid_alloc_cmd_packets - allocate shared mailbox * @adapter : soft state of the raid controller * * Allocate and align the shared mailbox. This mailbox is used to issue * all the commands. For IO based controllers, the mailbox is also registered * with the FW. Allocate memory for all commands as well. * This is our big allocator. */ static int megaraid_alloc_cmd_packets(adapter_t *adapter) { mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); struct pci_dev *pdev; unsigned long align; scb_t *scb; mbox_ccb_t *ccb; struct mraid_pci_blk *epthru_pci_blk; struct mraid_pci_blk *sg_pci_blk; struct mraid_pci_blk *mbox_pci_blk; int i; pdev = adapter->pdev; /* * Setup the mailbox * Allocate the common 16-byte aligned memory for the handshake * mailbox. */ raid_dev->una_mbox64 = dma_alloc_coherent(&adapter->pdev->dev, sizeof(mbox64_t), &raid_dev->una_mbox64_dma, GFP_KERNEL); if (!raid_dev->una_mbox64) { con_log(CL_ANN, (KERN_WARNING "megaraid: out of memory, %s %d\n", __func__, __LINE__)); return -1; } /* * Align the mailbox at 16-byte boundary */ raid_dev->mbox = &raid_dev->una_mbox64->mbox32; raid_dev->mbox = (mbox_t *)((((unsigned long)raid_dev->mbox) + 15) & (~0UL ^ 0xFUL)); raid_dev->mbox64 = (mbox64_t *)(((unsigned long)raid_dev->mbox) - 8); align = ((void *)raid_dev->mbox - ((void *)&raid_dev->una_mbox64->mbox32)); raid_dev->mbox_dma = (unsigned long)raid_dev->una_mbox64_dma + 8 + align; // Allocate memory for commands issued internally adapter->ibuf = dma_alloc_coherent(&pdev->dev, MBOX_IBUF_SIZE, &adapter->ibuf_dma_h, GFP_KERNEL); if (!adapter->ibuf) { con_log(CL_ANN, (KERN_WARNING "megaraid: out of memory, %s %d\n", __func__, __LINE__)); goto out_free_common_mbox; } // Allocate memory for our SCSI Command Blocks and their associated // memory /* * Allocate memory for the base list of scb. Later allocate memory for * CCBs and embedded components of each CCB and point the pointers in * scb to the allocated components * NOTE: The code to allocate SCB will be duplicated in all the LLD * since the calling routine does not yet know the number of available * commands. */ adapter->kscb_list = kcalloc(MBOX_MAX_SCSI_CMDS, sizeof(scb_t), GFP_KERNEL); if (adapter->kscb_list == NULL) { con_log(CL_ANN, (KERN_WARNING "megaraid: out of memory, %s %d\n", __func__, __LINE__)); goto out_free_ibuf; } // memory allocation for our command packets if (megaraid_mbox_setup_dma_pools(adapter) != 0) { con_log(CL_ANN, (KERN_WARNING "megaraid: out of memory, %s %d\n", __func__, __LINE__)); goto out_free_scb_list; } // Adjust the scb pointers and link in the free pool epthru_pci_blk = raid_dev->epthru_pool; sg_pci_blk = raid_dev->sg_pool; mbox_pci_blk = raid_dev->mbox_pool; for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) { scb = adapter->kscb_list + i; ccb = raid_dev->ccb_list + i; ccb->mbox = (mbox_t *)(mbox_pci_blk[i].vaddr + 16); ccb->raw_mbox = (uint8_t *)ccb->mbox; ccb->mbox64 = (mbox64_t *)(mbox_pci_blk[i].vaddr + 8); ccb->mbox_dma_h = (unsigned long)mbox_pci_blk[i].dma_addr + 16; // make sure the mailbox is aligned properly if (ccb->mbox_dma_h & 0x0F) { con_log(CL_ANN, (KERN_CRIT "megaraid mbox: not aligned on 16-bytes\n")); goto out_teardown_dma_pools; } ccb->epthru = (mraid_epassthru_t *) epthru_pci_blk[i].vaddr; ccb->epthru_dma_h = epthru_pci_blk[i].dma_addr; ccb->pthru = (mraid_passthru_t *)ccb->epthru; ccb->pthru_dma_h = ccb->epthru_dma_h; ccb->sgl64 = (mbox_sgl64 *)sg_pci_blk[i].vaddr; ccb->sgl_dma_h = sg_pci_blk[i].dma_addr; ccb->sgl32 = (mbox_sgl32 *)ccb->sgl64; scb->ccb = (caddr_t)ccb; scb->gp = 0; scb->sno = i; // command index scb->scp = NULL; scb->state = SCB_FREE; scb->dma_direction = DMA_NONE; scb->dma_type = MRAID_DMA_NONE; scb->dev_channel = -1; scb->dev_target = -1; // put scb in the free pool list_add_tail(&scb->list, &adapter->kscb_pool); } return 0; out_teardown_dma_pools: megaraid_mbox_teardown_dma_pools(adapter); out_free_scb_list: kfree(adapter->kscb_list); out_free_ibuf: dma_free_coherent(&pdev->dev, MBOX_IBUF_SIZE, (void *)adapter->ibuf, adapter->ibuf_dma_h); out_free_common_mbox: dma_free_coherent(&adapter->pdev->dev, sizeof(mbox64_t), (caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma); return -1; } /** * megaraid_free_cmd_packets - free memory * @adapter : soft state of the raid controller * * Release memory resources allocated for commands. */ static void megaraid_free_cmd_packets(adapter_t *adapter) { mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); megaraid_mbox_teardown_dma_pools(adapter); kfree(adapter->kscb_list); dma_free_coherent(&adapter->pdev->dev, MBOX_IBUF_SIZE, (void *)adapter->ibuf, adapter->ibuf_dma_h); dma_free_coherent(&adapter->pdev->dev, sizeof(mbox64_t), (caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma); return; } /** * megaraid_mbox_setup_dma_pools - setup dma pool for command packets * @adapter : HBA soft state * * Setup the dma pools for mailbox, passthru and extended passthru structures, * and scatter-gather lists. */ static int megaraid_mbox_setup_dma_pools(adapter_t *adapter) { mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); struct mraid_pci_blk *epthru_pci_blk; struct mraid_pci_blk *sg_pci_blk; struct mraid_pci_blk *mbox_pci_blk; int i; // Allocate memory for 16-bytes aligned mailboxes raid_dev->mbox_pool_handle = dma_pool_create("megaraid mbox pool", &adapter->pdev->dev, sizeof(mbox64_t) + 16, 16, 0); if (raid_dev->mbox_pool_handle == NULL) { goto fail_setup_dma_pool; } mbox_pci_blk = raid_dev->mbox_pool; for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) { mbox_pci_blk[i].vaddr = dma_pool_alloc( raid_dev->mbox_pool_handle, GFP_KERNEL, &mbox_pci_blk[i].dma_addr); if (!mbox_pci_blk[i].vaddr) { goto fail_setup_dma_pool; } } /* * Allocate memory for each embedded passthru strucuture pointer * Request for a 128 bytes aligned structure for each passthru command * structure * Since passthru and extended passthru commands are exclusive, they * share common memory pool. Passthru structures piggyback on memory * allocated to extended passthru since passthru is smaller of the two */ raid_dev->epthru_pool_handle = dma_pool_create("megaraid mbox pthru", &adapter->pdev->dev, sizeof(mraid_epassthru_t), 128, 0); if (raid_dev->epthru_pool_handle == NULL) { goto fail_setup_dma_pool; } epthru_pci_blk = raid_dev->epthru_pool; for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) { epthru_pci_blk[i].vaddr = dma_pool_alloc( raid_dev->epthru_pool_handle, GFP_KERNEL, &epthru_pci_blk[i].dma_addr); if (!epthru_pci_blk[i].vaddr) { goto fail_setup_dma_pool; } } // Allocate memory for each scatter-gather list. Request for 512 bytes // alignment for each sg list raid_dev->sg_pool_handle = dma_pool_create("megaraid mbox sg", &adapter->pdev->dev, sizeof(mbox_sgl64) * MBOX_MAX_SG_SIZE, 512, 0); if (raid_dev->sg_pool_handle == NULL) { goto fail_setup_dma_pool; } sg_pci_blk = raid_dev->sg_pool; for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) { sg_pci_blk[i].vaddr = dma_pool_alloc( raid_dev->sg_pool_handle, GFP_KERNEL, &sg_pci_blk[i].dma_addr); if (!sg_pci_blk[i].vaddr) { goto fail_setup_dma_pool; } } return 0; fail_setup_dma_pool: megaraid_mbox_teardown_dma_pools(adapter); return -1; } /** * megaraid_mbox_teardown_dma_pools - teardown dma pools for command packets * @adapter : HBA soft state * * Teardown the dma pool for mailbox, passthru and extended passthru * structures, and scatter-gather lists. */ static void megaraid_mbox_teardown_dma_pools(adapter_t *adapter) { mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); struct mraid_pci_blk *epthru_pci_blk; struct mraid_pci_blk *sg_pci_blk; struct mraid_pci_blk *mbox_pci_blk; int i; sg_pci_blk = raid_dev->sg_pool; for (i = 0; i < MBOX_MAX_SCSI_CMDS && sg_pci_blk[i].vaddr; i++) { dma_pool_free(raid_dev->sg_pool_handle, sg_pci_blk[i].vaddr, sg_pci_blk[i].dma_addr); } dma_pool_destroy(raid_dev->sg_pool_handle); epthru_pci_blk = raid_dev->epthru_pool; for (i = 0; i < MBOX_MAX_SCSI_CMDS && epthru_pci_blk[i].vaddr; i++) { dma_pool_free(raid_dev->epthru_pool_handle, epthru_pci_blk[i].vaddr, epthru_pci_blk[i].dma_addr); } dma_pool_destroy(raid_dev->epthru_pool_handle); mbox_pci_blk = raid_dev->mbox_pool; for (i = 0; i < MBOX_MAX_SCSI_CMDS && mbox_pci_blk[i].vaddr; i++) { dma_pool_free(raid_dev->mbox_pool_handle, mbox_pci_blk[i].vaddr, mbox_pci_blk[i].dma_addr); } dma_pool_destroy(raid_dev->mbox_pool_handle); return; } /** * megaraid_alloc_scb - detach and return a scb from the free list * @adapter : controller's soft state * @scp : pointer to the scsi command to be executed * * Return the scb from the head of the free list. %NULL if there are none * available. */ static scb_t * megaraid_alloc_scb(adapter_t *adapter, struct scsi_cmnd *scp) { struct list_head *head = &adapter->kscb_pool; scb_t *scb = NULL; unsigned long flags; // detach scb from free pool spin_lock_irqsave(SCSI_FREE_LIST_LOCK(adapter), flags); if (list_empty(head)) { spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags); return NULL; } scb = list_entry(head->next, scb_t, list); list_del_init(&scb->list); spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags); scb->state = SCB_ACTIVE; scb->scp = scp; scb->dma_type = MRAID_DMA_NONE; return scb; } /** * megaraid_dealloc_scb - return the scb to the free pool * @adapter : controller's soft state * @scb : scb to be freed * * Return the scb back to the free list of scbs. The caller must 'flush' the * SCB before calling us. E.g., performing pci_unamp and/or pci_sync etc. * NOTE NOTE: Make sure the scb is not on any list before calling this * routine. */ static inline void megaraid_dealloc_scb(adapter_t *adapter, scb_t *scb) { unsigned long flags; // put scb in the free pool scb->state = SCB_FREE; scb->scp = NULL; spin_lock_irqsave(SCSI_FREE_LIST_LOCK(adapter), flags); list_add(&scb->list, &adapter->kscb_pool); spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags); return; } /** * megaraid_mbox_mksgl - make the scatter-gather list * @adapter : controller's soft state * @scb : scsi control block * * Prepare the scatter-gather list. */ static int megaraid_mbox_mksgl(adapter_t *adapter, scb_t *scb) { struct scatterlist *sgl; mbox_ccb_t *ccb; struct scsi_cmnd *scp; int sgcnt; int i; scp = scb->scp; ccb = (mbox_ccb_t *)scb->ccb; sgcnt = scsi_dma_map(scp); BUG_ON(sgcnt < 0 || sgcnt > adapter->sglen); // no mapping required if no data to be transferred if (!sgcnt) return 0; scb->dma_type = MRAID_DMA_WSG; scsi_for_each_sg(scp, sgl, sgcnt, i) { ccb->sgl64[i].address = sg_dma_address(sgl); ccb->sgl64[i].length = sg_dma_len(sgl); } // Return count of SG nodes return sgcnt; } /** * mbox_post_cmd - issue a mailbox command * @adapter : controller's soft state * @scb : command to be issued * * Post the command to the controller if mailbox is available. */ static int mbox_post_cmd(adapter_t *adapter, scb_t *scb) { mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); mbox64_t *mbox64; mbox_t *mbox; mbox_ccb_t *ccb; unsigned long flags; unsigned int i = 0; ccb = (mbox_ccb_t *)scb->ccb; mbox = raid_dev->mbox; mbox64 = raid_dev->mbox64; /* * Check for busy mailbox. If it is, return failure - the caller * should retry later. */ spin_lock_irqsave(MAILBOX_LOCK(raid_dev), flags); if (unlikely(mbox->busy)) { do { udelay(1); i++; rmb(); } while(mbox->busy && (i < max_mbox_busy_wait)); if (mbox->busy) { spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags); return -1; } } // Copy this command's mailbox data into "adapter's" mailbox memcpy((caddr_t)mbox64, (caddr_t)ccb->mbox64, 22); mbox->cmdid = scb->sno; adapter->outstanding_cmds++; mbox->busy = 1; // Set busy mbox->poll = 0; mbox->ack = 0; wmb(); WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1); spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags); return 0; } /** * megaraid_queue_command_lck - generic queue entry point for all LLDs * @scp : pointer to the scsi command to be executed * * Queue entry point for mailbox based controllers. */ static int megaraid_queue_command_lck(struct scsi_cmnd *scp) { void (*done)(struct scsi_cmnd *) = scsi_done; adapter_t *adapter; scb_t *scb; int if_busy; adapter = SCP2ADAPTER(scp); scp->result = 0; /* * Allocate and build a SCB request * if_busy flag will be set if megaraid_mbox_build_cmd() command could * not allocate scb. We will return non-zero status in that case. * NOTE: scb can be null even though certain commands completed * successfully, e.g., MODE_SENSE and TEST_UNIT_READY, it would * return 0 in that case, and we would do the callback right away. */ if_busy = 0; scb = megaraid_mbox_build_cmd(adapter, scp, &if_busy); if (!scb) { // command already completed done(scp); return 0; } megaraid_mbox_runpendq(adapter, scb); return if_busy; } static DEF_SCSI_QCMD(megaraid_queue_command) /** * megaraid_mbox_build_cmd - transform the mid-layer scsi commands * @adapter : controller's soft state * @scp : mid-layer scsi command pointer * @busy : set if request could not be completed because of lack of * resources * * Transform the mid-layer scsi command to megaraid firmware lingua. * Convert the command issued by mid-layer to format understood by megaraid * firmware. We also complete certain commands without sending them to firmware. */ static scb_t * megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy) { mraid_device_t *rdev = ADAP2RAIDDEV(adapter); int channel; int target; int islogical; mbox_ccb_t *ccb; mraid_passthru_t *pthru; mbox64_t *mbox64; mbox_t *mbox; scb_t *scb; char skip[] = "skipping"; char scan[] = "scanning"; char *ss; /* * Get the appropriate device map for the device this command is * intended for */ MRAID_GET_DEVICE_MAP(adapter, scp, channel, target, islogical); /* * Logical drive commands */ if (islogical) { switch (scp->cmnd[0]) { case TEST_UNIT_READY: /* * Do we support clustering and is the support enabled * If no, return success always */ if (!adapter->ha) { scp->result = (DID_OK << 16); return NULL; } if (!(scb = megaraid_alloc_scb(adapter, scp))) { scp->result = (DID_ERROR << 16); *busy = 1; return NULL; } scb->dma_direction = scp->sc_data_direction; scb->dev_channel = 0xFF; scb->dev_target = target; ccb = (mbox_ccb_t *)scb->ccb; /* * The command id will be provided by the command * issuance routine */ ccb->raw_mbox[0] = CLUSTER_CMD; ccb->raw_mbox[2] = RESERVATION_STATUS; ccb->raw_mbox[3] = target; return scb; case MODE_SENSE: { struct scatterlist *sgl; caddr_t vaddr; sgl = scsi_sglist(scp); if (sg_page(sgl)) { vaddr = (caddr_t) sg_virt(&sgl[0]); memset(vaddr, 0, scp->cmnd[4]); } else { con_log(CL_ANN, (KERN_WARNING "megaraid mailbox: invalid sg:%d\n", __LINE__)); } } scp->result = (DID_OK << 16); return NULL; case INQUIRY: /* * Display the channel scan for logical drives * Do not display scan for a channel if already done. */ if (!(rdev->last_disp & (1L << SCP2CHANNEL(scp)))) { con_log(CL_ANN, (KERN_INFO "scsi[%d]: scanning scsi channel %d", adapter->host->host_no, SCP2CHANNEL(scp))); con_log(CL_ANN, ( " [virtual] for logical drives\n")); rdev->last_disp |= (1L << SCP2CHANNEL(scp)); } if (scp->cmnd[1] & MEGA_SCSI_INQ_EVPD) { scsi_build_sense(scp, 0, ILLEGAL_REQUEST, MEGA_INVALID_FIELD_IN_CDB, 0); return NULL; } fallthrough; case READ_CAPACITY: /* * Do not allow LUN > 0 for logical drives and * requests for more than 40 logical drives */ if (SCP2LUN(scp)) { scp->result = (DID_BAD_TARGET << 16); return NULL; } if ((target % 0x80) >= MAX_LOGICAL_DRIVES_40LD) { scp->result = (DID_BAD_TARGET << 16); return NULL; } /* Allocate a SCB and initialize passthru */ if (!(scb = megaraid_alloc_scb(adapter, scp))) { scp->result = (DID_ERROR << 16); *busy = 1; return NULL; } ccb = (mbox_ccb_t *)scb->ccb; scb->dev_channel = 0xFF; scb->dev_target = target; pthru = ccb->pthru; mbox = ccb->mbox; mbox64 = ccb->mbox64; pthru->timeout = 0; pthru->ars = 1; pthru->reqsenselen = 14; pthru->islogical = 1; pthru->logdrv = target; pthru->cdblen = scp->cmd_len; memcpy(pthru->cdb, scp->cmnd, scp->cmd_len); mbox->cmd = MBOXCMD_PASSTHRU64; scb->dma_direction = scp->sc_data_direction; pthru->dataxferlen = scsi_bufflen(scp); pthru->dataxferaddr = ccb->sgl_dma_h; pthru->numsge = megaraid_mbox_mksgl(adapter, scb); mbox->xferaddr = 0xFFFFFFFF; mbox64->xferaddr_lo = (uint32_t )ccb->pthru_dma_h; mbox64->xferaddr_hi = 0; return scb; case READ_6: case WRITE_6: case READ_10: case WRITE_10: case READ_12: case WRITE_12: /* * Allocate a SCB and initialize mailbox */ if (!(scb = megaraid_alloc_scb(adapter, scp))) { scp->result = (DID_ERROR << 16); *busy = 1; return NULL; } ccb = (mbox_ccb_t *)scb->ccb; scb->dev_channel = 0xFF; scb->dev_target = target; mbox = ccb->mbox; mbox64 = ccb->mbox64; mbox->logdrv = target; /* * A little HACK: 2nd bit is zero for all scsi read * commands and is set for all scsi write commands */ mbox->cmd = (scp->cmnd[0] & 0x02) ? MBOXCMD_LWRITE64: MBOXCMD_LREAD64 ; /* * 6-byte READ(0x08) or WRITE(0x0A) cdb */ if (scp->cmd_len == 6) { mbox->numsectors = (uint32_t)scp->cmnd[4]; mbox->lba = ((uint32_t)scp->cmnd[1] << 16) | ((uint32_t)scp->cmnd[2] << 8) | (uint32_t)scp->cmnd[3]; mbox->lba &= 0x1FFFFF; } /* * 10-byte READ(0x28) or WRITE(0x2A) cdb */ else if (scp->cmd_len == 10) { mbox->numsectors = (uint32_t)scp->cmnd[8] | ((uint32_t)scp->cmnd[7] << 8); mbox->lba = ((uint32_t)scp->cmnd[2] << 24) | ((uint32_t)scp->cmnd[3] << 16) | ((uint32_t)scp->cmnd[4] << 8) | (uint32_t)scp->cmnd[5]; } /* * 12-byte READ(0xA8) or WRITE(0xAA) cdb */ else if (scp->cmd_len == 12) { mbox->lba = ((uint32_t)scp->cmnd[2] << 24) | ((uint32_t)scp->cmnd[3] << 16) | ((uint32_t)scp->cmnd[4] << 8) | (uint32_t)scp->cmnd[5]; mbox->numsectors = ((uint32_t)scp->cmnd[6] << 24) | ((uint32_t)scp->cmnd[7] << 16) | ((uint32_t)scp->cmnd[8] << 8) | (uint32_t)scp->cmnd[9]; } else { con_log(CL_ANN, (KERN_WARNING "megaraid: unsupported CDB length\n")); megaraid_dealloc_scb(adapter, scb); scp->result = (DID_ERROR << 16); return NULL; } scb->dma_direction = scp->sc_data_direction; // Calculate Scatter-Gather info mbox64->xferaddr_lo = (uint32_t )ccb->sgl_dma_h; mbox->numsge = megaraid_mbox_mksgl(adapter, scb); mbox->xferaddr = 0xFFFFFFFF; mbox64->xferaddr_hi = 0; return scb; case RESERVE: case RELEASE: /* * Do we support clustering and is the support enabled */ if (!adapter->ha) { scp->result = (DID_BAD_TARGET << 16); return NULL; } /* * Allocate a SCB and initialize mailbox */ if (!(scb = megaraid_alloc_scb(adapter, scp))) { scp->result = (DID_ERROR << 16); *busy = 1; return NULL; } ccb = (mbox_ccb_t *)scb->ccb; scb->dev_channel = 0xFF; scb->dev_target = target; ccb->raw_mbox[0] = CLUSTER_CMD; ccb->raw_mbox[2] = (scp->cmnd[0] == RESERVE) ? RESERVE_LD : RELEASE_LD; ccb->raw_mbox[3] = target; scb->dma_direction = scp->sc_data_direction; return scb; default: scp->result = (DID_BAD_TARGET << 16); return NULL; } } else { // Passthru device commands // Do not allow access to target id > 15 or LUN > 7 if (target > 15 || SCP2LUN(scp) > 7) { scp->result = (DID_BAD_TARGET << 16); return NULL; } // if fast load option was set and scan for last device is // over, reset the fast_load flag so that during a possible // next scan, devices can be made available if (rdev->fast_load && (target == 15) && (SCP2CHANNEL(scp) == adapter->max_channel -1)) { con_log(CL_ANN, (KERN_INFO "megaraid[%d]: physical device scan re-enabled\n", adapter->host->host_no)); rdev->fast_load = 0; } /* * Display the channel scan for physical devices */ if (!(rdev->last_disp & (1L << SCP2CHANNEL(scp)))) { ss = rdev->fast_load ? skip : scan; con_log(CL_ANN, (KERN_INFO "scsi[%d]: %s scsi channel %d [Phy %d]", adapter->host->host_no, ss, SCP2CHANNEL(scp), channel)); con_log(CL_ANN, ( " for non-raid devices\n")); rdev->last_disp |= (1L << SCP2CHANNEL(scp)); } // disable channel sweep if fast load option given if (rdev->fast_load) { scp->result = (DID_BAD_TARGET << 16); return NULL; } // Allocate a SCB and initialize passthru if (!(scb = megaraid_alloc_scb(adapter, scp))) { scp->result = (DID_ERROR << 16); *busy = 1; return NULL; } ccb = (mbox_ccb_t *)scb->ccb; scb->dev_channel = channel; scb->dev_target = target; scb->dma_direction = scp->sc_data_direction; mbox = ccb->mbox; mbox64 = ccb->mbox64; // Does this firmware support extended CDBs if (adapter->max_cdb_sz == 16) { mbox->cmd = MBOXCMD_EXTPTHRU; megaraid_mbox_prepare_epthru(adapter, scb, scp); mbox64->xferaddr_lo = (uint32_t)ccb->epthru_dma_h; mbox64->xferaddr_hi = 0; mbox->xferaddr = 0xFFFFFFFF; } else { mbox->cmd = MBOXCMD_PASSTHRU64; megaraid_mbox_prepare_pthru(adapter, scb, scp); mbox64->xferaddr_lo = (uint32_t)ccb->pthru_dma_h; mbox64->xferaddr_hi = 0; mbox->xferaddr = 0xFFFFFFFF; } return scb; } // NOT REACHED } /** * megaraid_mbox_runpendq - execute commands queued in the pending queue * @adapter : controller's soft state * @scb_q : SCB to be queued in the pending list * * Scan the pending list for commands which are not yet issued and try to * post to the controller. The SCB can be a null pointer, which would indicate * no SCB to be queue, just try to execute the ones in the pending list. * * NOTE: We do not actually traverse the pending list. The SCBs are plucked * out from the head of the pending list. If it is successfully issued, the * next SCB is at the head now. */ static void megaraid_mbox_runpendq(adapter_t *adapter, scb_t *scb_q) { scb_t *scb; unsigned long flags; spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags); if (scb_q) { scb_q->state = SCB_PENDQ; list_add_tail(&scb_q->list, &adapter->pend_list); } // if the adapter in not in quiescent mode, post the commands to FW if (adapter->quiescent) { spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags); return; } while (!list_empty(&adapter->pend_list)) { assert_spin_locked(PENDING_LIST_LOCK(adapter)); scb = list_entry(adapter->pend_list.next, scb_t, list); // remove the scb from the pending list and try to // issue. If we are unable to issue it, put back in // the pending list and return list_del_init(&scb->list); spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags); // if mailbox was busy, return SCB back to pending // list. Make sure to add at the head, since that's // where it would have been removed from scb->state = SCB_ISSUED; if (mbox_post_cmd(adapter, scb) != 0) { spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags); scb->state = SCB_PENDQ; list_add(&scb->list, &adapter->pend_list); spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags); return; } spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags); } spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags); return; } /** * megaraid_mbox_prepare_pthru - prepare a command for physical devices * @adapter : pointer to controller's soft state * @scb : scsi control block * @scp : scsi command from the mid-layer * * Prepare a command for the scsi physical devices. */ static void megaraid_mbox_prepare_pthru(adapter_t *adapter, scb_t *scb, struct scsi_cmnd *scp) { mbox_ccb_t *ccb; mraid_passthru_t *pthru; uint8_t channel; uint8_t target; ccb = (mbox_ccb_t *)scb->ccb; pthru = ccb->pthru; channel = scb->dev_channel; target = scb->dev_target; // 0=6sec, 1=60sec, 2=10min, 3=3hrs, 4=NO timeout pthru->timeout = 4; pthru->ars = 1; pthru->islogical = 0; pthru->channel = 0; pthru->target = (channel << 4) | target; pthru->logdrv = SCP2LUN(scp); pthru->reqsenselen = 14; pthru->cdblen = scp->cmd_len; memcpy(pthru->cdb, scp->cmnd, scp->cmd_len); if (scsi_bufflen(scp)) { pthru->dataxferlen = scsi_bufflen(scp); pthru->dataxferaddr = ccb->sgl_dma_h; pthru->numsge = megaraid_mbox_mksgl(adapter, scb); } else { pthru->dataxferaddr = 0; pthru->dataxferlen = 0; pthru->numsge = 0; } return; } /** * megaraid_mbox_prepare_epthru - prepare a command for physical devices * @adapter : pointer to controller's soft state * @scb : scsi control block * @scp : scsi command from the mid-layer * * Prepare a command for the scsi physical devices. This routine prepares * commands for devices which can take extended CDBs (>10 bytes). */ static void megaraid_mbox_prepare_epthru(adapter_t *adapter, scb_t *scb, struct scsi_cmnd *scp) { mbox_ccb_t *ccb; mraid_epassthru_t *epthru; uint8_t channel; uint8_t target; ccb = (mbox_ccb_t *)scb->ccb; epthru = ccb->epthru; channel = scb->dev_channel; target = scb->dev_target; // 0=6sec, 1=60sec, 2=10min, 3=3hrs, 4=NO timeout epthru->timeout = 4; epthru->ars = 1; epthru->islogical = 0; epthru->channel = 0; epthru->target = (channel << 4) | target; epthru->logdrv = SCP2LUN(scp); epthru->reqsenselen = 14; epthru->cdblen = scp->cmd_len; memcpy(epthru->cdb, scp->cmnd, scp->cmd_len); if (scsi_bufflen(scp)) { epthru->dataxferlen = scsi_bufflen(scp); epthru->dataxferaddr = ccb->sgl_dma_h; epthru->numsge = megaraid_mbox_mksgl(adapter, scb); } else { epthru->dataxferaddr = 0; epthru->dataxferlen = 0; epthru->numsge = 0; } return; } /** * megaraid_ack_sequence - interrupt ack sequence for memory mapped HBAs * @adapter : controller's soft state * * Interrupt acknowledgement sequence for memory mapped HBAs. Find out the * completed command and put them on the completed list for later processing. * * Returns: 1 if the interrupt is valid, 0 otherwise */ static int megaraid_ack_sequence(adapter_t *adapter) { mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); mbox_t *mbox; scb_t *scb; uint8_t nstatus; uint8_t completed[MBOX_MAX_FIRMWARE_STATUS]; struct list_head clist; int handled; uint32_t dword; unsigned long flags; int i, j; mbox = raid_dev->mbox; // move the SCBs from the firmware completed array to our local list INIT_LIST_HEAD(&clist); // loop till F/W has more commands for us to complete handled = 0; spin_lock_irqsave(MAILBOX_LOCK(raid_dev), flags); do { /* * Check if a valid interrupt is pending. If found, force the * interrupt line low. */ dword = RDOUTDOOR(raid_dev); if (dword != 0x10001234) break; handled = 1; WROUTDOOR(raid_dev, 0x10001234); nstatus = 0; // wait for valid numstatus to post for (i = 0; i < 0xFFFFF; i++) { if (mbox->numstatus != 0xFF) { nstatus = mbox->numstatus; break; } rmb(); } mbox->numstatus = 0xFF; adapter->outstanding_cmds -= nstatus; for (i = 0; i < nstatus; i++) { // wait for valid command index to post for (j = 0; j < 0xFFFFF; j++) { if (mbox->completed[i] != 0xFF) break; rmb(); } completed[i] = mbox->completed[i]; mbox->completed[i] = 0xFF; if (completed[i] == 0xFF) { con_log(CL_ANN, (KERN_CRIT "megaraid: command posting timed out\n")); BUG(); continue; } // Get SCB associated with this command id if (completed[i] >= MBOX_MAX_SCSI_CMDS) { // a cmm command scb = adapter->uscb_list + (completed[i] - MBOX_MAX_SCSI_CMDS); } else { // an os command scb = adapter->kscb_list + completed[i]; } scb->status = mbox->status; list_add_tail(&scb->list, &clist); } // Acknowledge interrupt WRINDOOR(raid_dev, 0x02); } while(1); spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags); // put the completed commands in the completed list. DPC would // complete these commands later spin_lock_irqsave(COMPLETED_LIST_LOCK(adapter), flags); list_splice(&clist, &adapter->completed_list); spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter), flags); // schedule the DPC if there is some work for it if (handled) tasklet_schedule(&adapter->dpc_h); return handled; } /** * megaraid_isr - isr for memory based mailbox based controllers * @irq : irq * @devp : pointer to our soft state * * Interrupt service routine for memory-mapped mailbox controllers. */ static irqreturn_t megaraid_isr(int irq, void *devp) { adapter_t *adapter = devp; int handled; handled = megaraid_ack_sequence(adapter); /* Loop through any pending requests */ if (!adapter->quiescent) { megaraid_mbox_runpendq(adapter, NULL); } return IRQ_RETVAL(handled); } /** * megaraid_mbox_dpc - the tasklet to complete the commands from completed list * @devp : pointer to HBA soft state * * Pick up the commands from the completed list and send back to the owners. * This is a reentrant function and does not assume any locks are held while * it is being called. */ static void megaraid_mbox_dpc(unsigned long devp) { adapter_t *adapter = (adapter_t *)devp; mraid_device_t *raid_dev; struct list_head clist; struct scatterlist *sgl; scb_t *scb; scb_t *tmp; struct scsi_cmnd *scp; mraid_passthru_t *pthru; mraid_epassthru_t *epthru; mbox_ccb_t *ccb; int islogical; int pdev_index; int pdev_state; mbox_t *mbox; unsigned long flags; uint8_t c; int status; uioc_t *kioc; if (!adapter) return; raid_dev = ADAP2RAIDDEV(adapter); // move the SCBs from the completed list to our local list INIT_LIST_HEAD(&clist); spin_lock_irqsave(COMPLETED_LIST_LOCK(adapter), flags); list_splice_init(&adapter->completed_list, &clist); spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter), flags); list_for_each_entry_safe(scb, tmp, &clist, list) { status = scb->status; scp = scb->scp; ccb = (mbox_ccb_t *)scb->ccb; pthru = ccb->pthru; epthru = ccb->epthru; mbox = ccb->mbox; // Make sure f/w has completed a valid command if (scb->state != SCB_ISSUED) { con_log(CL_ANN, (KERN_CRIT "megaraid critical err: invalid command %d:%d:%p\n", scb->sno, scb->state, scp)); BUG(); continue; // Must never happen! } // check for the management command and complete it right away if (scb->sno >= MBOX_MAX_SCSI_CMDS) { scb->state = SCB_FREE; scb->status = status; // remove from local clist list_del_init(&scb->list); kioc = (uioc_t *)scb->gp; kioc->status = 0; megaraid_mbox_mm_done(adapter, scb); continue; } // Was an abort issued for this command earlier if (scb->state & SCB_ABORT) { con_log(CL_ANN, (KERN_NOTICE "megaraid: aborted cmd [%x] completed\n", scb->sno)); } /* * If the inquiry came of a disk drive which is not part of * any RAID array, expose it to the kernel. For this to be * enabled, user must set the "megaraid_expose_unconf_disks" * flag to 1 by specifying it on module parameter list. * This would enable data migration off drives from other * configurations. */ islogical = MRAID_IS_LOGICAL(adapter, scp); if (scp->cmnd[0] == INQUIRY && status == 0 && islogical == 0 && IS_RAID_CH(raid_dev, scb->dev_channel)) { sgl = scsi_sglist(scp); if (sg_page(sgl)) { c = *(unsigned char *) sg_virt(&sgl[0]); } else { con_log(CL_ANN, (KERN_WARNING "megaraid mailbox: invalid sg:%d\n", __LINE__)); c = 0; } if ((c & 0x1F ) == TYPE_DISK) { pdev_index = (scb->dev_channel * 16) + scb->dev_target; pdev_state = raid_dev->pdrv_state[pdev_index] & 0x0F; if (pdev_state == PDRV_ONLINE || pdev_state == PDRV_FAILED || pdev_state == PDRV_RBLD || pdev_state == PDRV_HOTSPARE || megaraid_expose_unconf_disks == 0) { status = 0xF0; } } } // Convert MegaRAID status to Linux error code switch (status) { case 0x00: scp->result = (DID_OK << 16); break; case 0x02: /* set sense_buffer and result fields */ if (mbox->cmd == MBOXCMD_PASSTHRU || mbox->cmd == MBOXCMD_PASSTHRU64) { memcpy(scp->sense_buffer, pthru->reqsensearea, 14); scp->result = SAM_STAT_CHECK_CONDITION; } else { if (mbox->cmd == MBOXCMD_EXTPTHRU) { memcpy(scp->sense_buffer, epthru->reqsensearea, 14); scp->result = SAM_STAT_CHECK_CONDITION; } else scsi_build_sense(scp, 0, ABORTED_COMMAND, 0, 0); } break; case 0x08: scp->result = DID_BUS_BUSY << 16 | status; break; default: /* * If TEST_UNIT_READY fails, we know RESERVATION_STATUS * failed */ if (scp->cmnd[0] == TEST_UNIT_READY) { scp->result = DID_ERROR << 16 | SAM_STAT_RESERVATION_CONFLICT; } else /* * Error code returned is 1 if Reserve or Release * failed or the input parameter is invalid */ if (status == 1 && (scp->cmnd[0] == RESERVE || scp->cmnd[0] == RELEASE)) { scp->result = DID_ERROR << 16 | SAM_STAT_RESERVATION_CONFLICT; } else { scp->result = DID_BAD_TARGET << 16 | status; } } // print a debug message for all failed commands if (status) { megaraid_mbox_display_scb(adapter, scb); } scsi_dma_unmap(scp); // remove from local clist list_del_init(&scb->list); // put back in free list megaraid_dealloc_scb(adapter, scb); // send the scsi packet back to kernel scsi_done(scp); } return; } /** * megaraid_abort_handler - abort the scsi command * @scp : command to be aborted * * Abort a previous SCSI request. Only commands on the pending list can be * aborted. All the commands issued to the F/W must complete. **/ static int megaraid_abort_handler(struct scsi_cmnd *scp) { adapter_t *adapter; mraid_device_t *raid_dev; scb_t *scb; scb_t *tmp; int found; unsigned long flags; int i; adapter = SCP2ADAPTER(scp); raid_dev = ADAP2RAIDDEV(adapter); con_log(CL_ANN, (KERN_WARNING "megaraid: aborting cmd=%x <c=%d t=%d l=%d>\n", scp->cmnd[0], SCP2CHANNEL(scp), SCP2TARGET(scp), SCP2LUN(scp))); // If FW has stopped responding, simply return failure if (raid_dev->hw_error) { con_log(CL_ANN, (KERN_NOTICE "megaraid: hw error, not aborting\n")); return FAILED; } // There might a race here, where the command was completed by the // firmware and now it is on the completed list. Before we could // complete the command to the kernel in dpc, the abort came. // Find out if this is the case to avoid the race. scb = NULL; spin_lock_irqsave(COMPLETED_LIST_LOCK(adapter), flags); list_for_each_entry_safe(scb, tmp, &adapter->completed_list, list) { if (scb->scp == scp) { // Found command list_del_init(&scb->list); // from completed list con_log(CL_ANN, (KERN_WARNING "megaraid: %d[%d:%d], abort from completed list\n", scb->sno, scb->dev_channel, scb->dev_target)); scp->result = (DID_ABORT << 16); scsi_done(scp); megaraid_dealloc_scb(adapter, scb); spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter), flags); return SUCCESS; } } spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter), flags); // Find out if this command is still on the pending list. If it is and // was never issued, abort and return success. If the command is owned // by the firmware, we must wait for it to complete by the FW. spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags); list_for_each_entry_safe(scb, tmp, &adapter->pend_list, list) { if (scb->scp == scp) { // Found command list_del_init(&scb->list); // from pending list ASSERT(!(scb->state & SCB_ISSUED)); con_log(CL_ANN, (KERN_WARNING "megaraid abort: [%d:%d], driver owner\n", scb->dev_channel, scb->dev_target)); scp->result = (DID_ABORT << 16); scsi_done(scp); megaraid_dealloc_scb(adapter, scb); spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags); return SUCCESS; } } spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags); // Check do we even own this command, in which case this would be // owned by the firmware. The only way to locate the FW scb is to // traverse through the list of all SCB, since driver does not // maintain these SCBs on any list found = 0; spin_lock_irq(&adapter->lock); for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) { scb = adapter->kscb_list + i; if (scb->scp == scp) { found = 1; if (!(scb->state & SCB_ISSUED)) { con_log(CL_ANN, (KERN_WARNING "megaraid abort: %d[%d:%d], invalid state\n", scb->sno, scb->dev_channel, scb->dev_target)); BUG(); } else { con_log(CL_ANN, (KERN_WARNING "megaraid abort: %d[%d:%d], fw owner\n", scb->sno, scb->dev_channel, scb->dev_target)); } } } spin_unlock_irq(&adapter->lock); if (!found) { con_log(CL_ANN, (KERN_WARNING "megaraid abort: do now own\n")); // FIXME: Should there be a callback for this command? return SUCCESS; } // We cannot actually abort a command owned by firmware, return // failure and wait for reset. In host reset handler, we will find out // if the HBA is still live return FAILED; } /** * megaraid_reset_handler - device reset handler for mailbox based driver * @scp : reference command * * Reset handler for the mailbox based controller. First try to find out if * the FW is still live, in which case the outstanding commands counter mut go * down to 0. If that happens, also issue the reservation reset command to * relinquish (possible) reservations on the logical drives connected to this * host. **/ static int megaraid_reset_handler(struct scsi_cmnd *scp) { adapter_t *adapter; scb_t *scb; scb_t *tmp; mraid_device_t *raid_dev; unsigned long flags; uint8_t raw_mbox[sizeof(mbox_t)]; int rval; int recovery_window; int i; uioc_t *kioc; adapter = SCP2ADAPTER(scp); raid_dev = ADAP2RAIDDEV(adapter); // return failure if adapter is not responding if (raid_dev->hw_error) { con_log(CL_ANN, (KERN_NOTICE "megaraid: hw error, cannot reset\n")); return FAILED; } // Under exceptional conditions, FW can take up to 3 minutes to // complete command processing. Wait for additional 2 minutes for the // pending commands counter to go down to 0. If it doesn't, let the // controller be marked offline // Also, reset all the commands currently owned by the driver spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags); list_for_each_entry_safe(scb, tmp, &adapter->pend_list, list) { list_del_init(&scb->list); // from pending list if (scb->sno >= MBOX_MAX_SCSI_CMDS) { con_log(CL_ANN, (KERN_WARNING "megaraid: IOCTL packet with %d[%d:%d] being reset\n", scb->sno, scb->dev_channel, scb->dev_target)); scb->status = -1; kioc = (uioc_t *)scb->gp; kioc->status = -EFAULT; megaraid_mbox_mm_done(adapter, scb); } else { if (scb->scp == scp) { // Found command con_log(CL_ANN, (KERN_WARNING "megaraid: %d[%d:%d], reset from pending list\n", scb->sno, scb->dev_channel, scb->dev_target)); } else { con_log(CL_ANN, (KERN_WARNING "megaraid: IO packet with %d[%d:%d] being reset\n", scb->sno, scb->dev_channel, scb->dev_target)); } scb->scp->result = (DID_RESET << 16); scsi_done(scb->scp); megaraid_dealloc_scb(adapter, scb); } } spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags); if (adapter->outstanding_cmds) { con_log(CL_ANN, (KERN_NOTICE "megaraid: %d outstanding commands. Max wait %d sec\n", adapter->outstanding_cmds, (MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT))); } recovery_window = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT; for (i = 0; i < recovery_window; i++) { megaraid_ack_sequence(adapter); // print a message once every 5 seconds only if (!(i % 5)) { con_log(CL_ANN, ( "megaraid mbox: Wait for %d commands to complete:%d\n", adapter->outstanding_cmds, (MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT) - i)); } // bailout if no recovery happened in reset time if (adapter->outstanding_cmds == 0) { break; } msleep(1000); } spin_lock(&adapter->lock); // If still outstanding commands, bail out if (adapter->outstanding_cmds) { con_log(CL_ANN, (KERN_WARNING "megaraid mbox: critical hardware error!\n")); raid_dev->hw_error = 1; rval = FAILED; goto out; } else { con_log(CL_ANN, (KERN_NOTICE "megaraid mbox: reset sequence completed successfully\n")); } // If the controller supports clustering, reset reservations if (!adapter->ha) { rval = SUCCESS; goto out; } // clear reservations if any raw_mbox[0] = CLUSTER_CMD; raw_mbox[2] = RESET_RESERVATIONS; rval = SUCCESS; if (mbox_post_sync_cmd_fast(adapter, raw_mbox) == 0) { con_log(CL_ANN, (KERN_INFO "megaraid: reservation reset\n")); } else { rval = FAILED; con_log(CL_ANN, (KERN_WARNING "megaraid: reservation reset failed\n")); } out: spin_unlock(&adapter->lock); return rval; } /* * START: internal commands library * * This section of the driver has the common routine used by the driver and * also has all the FW routines */ /** * mbox_post_sync_cmd() - blocking command to the mailbox based controllers * @adapter : controller's soft state * @raw_mbox : the mailbox * * Issue a scb in synchronous and non-interrupt mode for mailbox based * controllers. */ static int mbox_post_sync_cmd(adapter_t *adapter, uint8_t raw_mbox[]) { mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); mbox_t *mbox; uint8_t status; int i; mbox = raid_dev->mbox; /* * Wait until mailbox is free */ if (megaraid_busywait_mbox(raid_dev) != 0) goto blocked_mailbox; /* * Copy mailbox data into host structure */ memcpy((caddr_t)mbox, (caddr_t)raw_mbox, 16); mbox->cmdid = 0xFE; mbox->busy = 1; mbox->poll = 0; mbox->ack = 0; mbox->numstatus = 0xFF; mbox->status = 0xFF; wmb(); WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1); // wait for maximum 1 second for status to post. If the status is not // available within 1 second, assume FW is initializing and wait // for an extended amount of time if (mbox->numstatus == 0xFF) { // status not yet available udelay(25); for (i = 0; mbox->numstatus == 0xFF && i < 1000; i++) { rmb(); msleep(1); } if (i == 1000) { con_log(CL_ANN, (KERN_NOTICE "megaraid mailbox: wait for FW to boot ")); for (i = 0; (mbox->numstatus == 0xFF) && (i < MBOX_RESET_WAIT); i++) { rmb(); con_log(CL_ANN, ("\b\b\b\b\b[%03d]", MBOX_RESET_WAIT - i)); msleep(1000); } if (i == MBOX_RESET_WAIT) { con_log(CL_ANN, ( "\nmegaraid mailbox: status not available\n")); return -1; } con_log(CL_ANN, ("\b\b\b\b\b[ok] \n")); } } // wait for maximum 1 second for poll semaphore if (mbox->poll != 0x77) { udelay(25); for (i = 0; (mbox->poll != 0x77) && (i < 1000); i++) { rmb(); msleep(1); } if (i == 1000) { con_log(CL_ANN, (KERN_WARNING "megaraid mailbox: could not get poll semaphore\n")); return -1; } } WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x2); wmb(); // wait for maximum 1 second for acknowledgement if (RDINDOOR(raid_dev) & 0x2) { udelay(25); for (i = 0; (RDINDOOR(raid_dev) & 0x2) && (i < 1000); i++) { rmb(); msleep(1); } if (i == 1000) { con_log(CL_ANN, (KERN_WARNING "megaraid mailbox: could not acknowledge\n")); return -1; } } mbox->poll = 0; mbox->ack = 0x77; status = mbox->status; // invalidate the completed command id array. After command // completion, firmware would write the valid id. mbox->numstatus = 0xFF; mbox->status = 0xFF; for (i = 0; i < MBOX_MAX_FIRMWARE_STATUS; i++) { mbox->completed[i] = 0xFF; } return status; blocked_mailbox: con_log(CL_ANN, (KERN_WARNING "megaraid: blocked mailbox\n") ); return -1; } /** * mbox_post_sync_cmd_fast - blocking command to the mailbox based controllers * @adapter : controller's soft state * @raw_mbox : the mailbox * * Issue a scb in synchronous and non-interrupt mode for mailbox based * controllers. This is a faster version of the synchronous command and * therefore can be called in interrupt-context as well. */ static int mbox_post_sync_cmd_fast(adapter_t *adapter, uint8_t raw_mbox[]) { mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); mbox_t *mbox; long i; mbox = raid_dev->mbox; // return immediately if the mailbox is busy if (mbox->busy) return -1; // Copy mailbox data into host structure memcpy((caddr_t)mbox, (caddr_t)raw_mbox, 14); mbox->cmdid = 0xFE; mbox->busy = 1; mbox->poll = 0; mbox->ack = 0; mbox->numstatus = 0xFF; mbox->status = 0xFF; wmb(); WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1); for (i = 0; i < MBOX_SYNC_WAIT_CNT; i++) { if (mbox->numstatus != 0xFF) break; rmb(); udelay(MBOX_SYNC_DELAY_200); } if (i == MBOX_SYNC_WAIT_CNT) { // We may need to re-calibrate the counter con_log(CL_ANN, (KERN_CRIT "megaraid: fast sync command timed out\n")); } WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x2); wmb(); return mbox->status; } /** * megaraid_busywait_mbox() - Wait until the controller's mailbox is available * @raid_dev : RAID device (HBA) soft state * * Wait until the controller's mailbox is available to accept more commands. * Wait for at most 1 second. */ static int megaraid_busywait_mbox(mraid_device_t *raid_dev) { mbox_t *mbox = raid_dev->mbox; int i = 0; if (mbox->busy) { udelay(25); for (i = 0; mbox->busy && i < 1000; i++) msleep(1); } if (i < 1000) return 0; else return -1; } /** * megaraid_mbox_product_info - some static information about the controller * @adapter : our soft state * * Issue commands to the controller to grab some parameters required by our * caller. */ static int megaraid_mbox_product_info(adapter_t *adapter) { mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); mbox_t *mbox; uint8_t raw_mbox[sizeof(mbox_t)]; mraid_pinfo_t *pinfo; dma_addr_t pinfo_dma_h; mraid_inquiry3_t *mraid_inq3; int i; memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox)); mbox = (mbox_t *)raw_mbox; /* * Issue an ENQUIRY3 command to find out certain adapter parameters, * e.g., max channels, max commands etc. */ pinfo = dma_alloc_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t), &pinfo_dma_h, GFP_KERNEL); if (pinfo == NULL) { con_log(CL_ANN, (KERN_WARNING "megaraid: out of memory, %s %d\n", __func__, __LINE__)); return -1; } mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h; memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE); raw_mbox[0] = FC_NEW_CONFIG; raw_mbox[2] = NC_SUBOP_ENQUIRY3; raw_mbox[3] = ENQ3_GET_SOLICITED_FULL; // Issue the command if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) { con_log(CL_ANN, (KERN_WARNING "megaraid: Inquiry3 failed\n")); dma_free_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t), pinfo, pinfo_dma_h); return -1; } /* * Collect information about state of each physical drive * attached to the controller. We will expose all the disks * which are not part of RAID */ mraid_inq3 = (mraid_inquiry3_t *)adapter->ibuf; for (i = 0; i < MBOX_MAX_PHYSICAL_DRIVES; i++) { raid_dev->pdrv_state[i] = mraid_inq3->pdrv_state[i]; } /* * Get product info for information like number of channels, * maximum commands supported. */ memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox)); mbox->xferaddr = (uint32_t)pinfo_dma_h; raw_mbox[0] = FC_NEW_CONFIG; raw_mbox[2] = NC_SUBOP_PRODUCT_INFO; if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) { con_log(CL_ANN, (KERN_WARNING "megaraid: product info failed\n")); dma_free_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t), pinfo, pinfo_dma_h); return -1; } /* * Setup some parameters for host, as required by our caller */ adapter->max_channel = pinfo->nchannels; /* * we will export all the logical drives on a single channel. * Add 1 since inquires do not come for inititor ID */ adapter->max_target = MAX_LOGICAL_DRIVES_40LD + 1; adapter->max_lun = 8; // up to 8 LUNs for non-disk devices /* * These are the maximum outstanding commands for the scsi-layer */ adapter->max_cmds = MBOX_MAX_SCSI_CMDS; memset(adapter->fw_version, 0, VERSION_SIZE); memset(adapter->bios_version, 0, VERSION_SIZE); memcpy(adapter->fw_version, pinfo->fw_version, 4); adapter->fw_version[4] = 0; memcpy(adapter->bios_version, pinfo->bios_version, 4); adapter->bios_version[4] = 0; con_log(CL_ANN, (KERN_NOTICE "megaraid: fw version:[%s] bios version:[%s]\n", adapter->fw_version, adapter->bios_version)); dma_free_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t), pinfo, pinfo_dma_h); return 0; } /** * megaraid_mbox_extended_cdb - check for support for extended CDBs * @adapter : soft state for the controller * * This routine check whether the controller in question supports extended * ( > 10 bytes ) CDBs. */ static int megaraid_mbox_extended_cdb(adapter_t *adapter) { mbox_t *mbox; uint8_t raw_mbox[sizeof(mbox_t)]; int rval; mbox = (mbox_t *)raw_mbox; memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox)); mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h; memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE); raw_mbox[0] = MAIN_MISC_OPCODE; raw_mbox[2] = SUPPORT_EXT_CDB; /* * Issue the command */ rval = 0; if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) { rval = -1; } return rval; } /** * megaraid_mbox_support_ha - Do we support clustering * @adapter : soft state for the controller * @init_id : ID of the initiator * * Determine if the firmware supports clustering and the ID of the initiator. */ static int megaraid_mbox_support_ha(adapter_t *adapter, uint16_t *init_id) { mbox_t *mbox; uint8_t raw_mbox[sizeof(mbox_t)]; int rval; mbox = (mbox_t *)raw_mbox; memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox)); mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h; memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE); raw_mbox[0] = GET_TARGET_ID; // Issue the command *init_id = 7; rval = -1; if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) { *init_id = *(uint8_t *)adapter->ibuf; con_log(CL_ANN, (KERN_INFO "megaraid: cluster firmware, initiator ID: %d\n", *init_id)); rval = 0; } return rval; } /** * megaraid_mbox_support_random_del - Do we support random deletion * @adapter : soft state for the controller * * Determine if the firmware supports random deletion. * Return: 1 is operation supported, 0 otherwise */ static int megaraid_mbox_support_random_del(adapter_t *adapter) { uint8_t raw_mbox[sizeof(mbox_t)]; int rval; /* * Newer firmware on Dell CERC expect a different * random deletion handling, so disable it. */ if (adapter->pdev->vendor == PCI_VENDOR_ID_AMI && adapter->pdev->device == PCI_DEVICE_ID_AMI_MEGARAID3 && adapter->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && adapter->pdev->subsystem_device == PCI_SUBSYS_ID_CERC_ATA100_4CH && (adapter->fw_version[0] > '6' || (adapter->fw_version[0] == '6' && adapter->fw_version[2] > '6') || (adapter->fw_version[0] == '6' && adapter->fw_version[2] == '6' && adapter->fw_version[3] > '1'))) { con_log(CL_DLEVEL1, ("megaraid: disable random deletion\n")); return 0; } memset((caddr_t)raw_mbox, 0, sizeof(mbox_t)); raw_mbox[0] = FC_DEL_LOGDRV; raw_mbox[2] = OP_SUP_DEL_LOGDRV; // Issue the command rval = 0; if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) { con_log(CL_DLEVEL1, ("megaraid: supports random deletion\n")); rval = 1; } return rval; } /** * megaraid_mbox_get_max_sg - maximum sg elements supported by the firmware * @adapter : soft state for the controller * * Find out the maximum number of scatter-gather elements supported by the * firmware. */ static int megaraid_mbox_get_max_sg(adapter_t *adapter) { mbox_t *mbox; uint8_t raw_mbox[sizeof(mbox_t)]; int nsg; mbox = (mbox_t *)raw_mbox; memset((caddr_t)raw_mbox, 0, sizeof(mbox_t)); mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h; memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE); raw_mbox[0] = MAIN_MISC_OPCODE; raw_mbox[2] = GET_MAX_SG_SUPPORT; // Issue the command if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) { nsg = *(uint8_t *)adapter->ibuf; } else { nsg = MBOX_DEFAULT_SG_SIZE; } if (nsg > MBOX_MAX_SG_SIZE) nsg = MBOX_MAX_SG_SIZE; return nsg; } /** * megaraid_mbox_enum_raid_scsi - enumerate the RAID and SCSI channels * @adapter : soft state for the controller * * Enumerate the RAID and SCSI channels for ROMB platforms so that channels * can be exported as regular SCSI channels. */ static void megaraid_mbox_enum_raid_scsi(adapter_t *adapter) { mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); mbox_t *mbox; uint8_t raw_mbox[sizeof(mbox_t)]; mbox = (mbox_t *)raw_mbox; memset((caddr_t)raw_mbox, 0, sizeof(mbox_t)); mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h; memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE); raw_mbox[0] = CHNL_CLASS; raw_mbox[2] = GET_CHNL_CLASS; // Issue the command. If the command fails, all channels are RAID // channels raid_dev->channel_class = 0xFF; if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) { raid_dev->channel_class = *(uint8_t *)adapter->ibuf; } return; } /** * megaraid_mbox_flush_cache - flush adapter and disks cache * @adapter : soft state for the controller * * Flush adapter cache followed by disks cache. */ static void megaraid_mbox_flush_cache(adapter_t *adapter) { uint8_t raw_mbox[sizeof(mbox_t)]; memset((caddr_t)raw_mbox, 0, sizeof(mbox_t)); raw_mbox[0] = FLUSH_ADAPTER; if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) { con_log(CL_ANN, ("megaraid: flush adapter failed\n")); } raw_mbox[0] = FLUSH_SYSTEM; if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) { con_log(CL_ANN, ("megaraid: flush disks cache failed\n")); } return; } /** * megaraid_mbox_fire_sync_cmd - fire the sync cmd * @adapter : soft state for the controller * * Clears the pending cmds in FW and reinits its RAID structs. */ static int megaraid_mbox_fire_sync_cmd(adapter_t *adapter) { mbox_t *mbox; uint8_t raw_mbox[sizeof(mbox_t)]; mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); int status = 0; int i; uint32_t dword; memset((caddr_t)raw_mbox, 0, sizeof(mbox_t)); raw_mbox[0] = 0xFF; mbox = raid_dev->mbox; /* Wait until mailbox is free */ if (megaraid_busywait_mbox(raid_dev) != 0) { status = 1; goto blocked_mailbox; } /* Copy mailbox data into host structure */ memcpy((caddr_t)mbox, (caddr_t)raw_mbox, 16); mbox->cmdid = 0xFE; mbox->busy = 1; mbox->poll = 0; mbox->ack = 0; mbox->numstatus = 0; mbox->status = 0; wmb(); WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1); /* Wait for maximum 1 min for status to post. * If the Firmware SUPPORTS the ABOVE COMMAND, * mbox->cmd will be set to 0 * else * the firmware will reject the command with * mbox->numstatus set to 1 */ i = 0; status = 0; while (!mbox->numstatus && mbox->cmd == 0xFF) { rmb(); msleep(1); i++; if (i > 1000 * 60) { status = 1; break; } } if (mbox->numstatus == 1) status = 1; /*cmd not supported*/ /* Check for interrupt line */ dword = RDOUTDOOR(raid_dev); WROUTDOOR(raid_dev, dword); WRINDOOR(raid_dev,2); return status; blocked_mailbox: con_log(CL_ANN, (KERN_WARNING "megaraid: blocked mailbox\n")); return status; } /** * megaraid_mbox_display_scb - display SCB information, mostly debug purposes * @adapter : controller's soft state * @scb : SCB to be displayed * * Diplay information about the given SCB iff the current debug level is * verbose. */ static void megaraid_mbox_display_scb(adapter_t *adapter, scb_t *scb) { mbox_ccb_t *ccb; struct scsi_cmnd *scp; mbox_t *mbox; int level; int i; ccb = (mbox_ccb_t *)scb->ccb; scp = scb->scp; mbox = ccb->mbox; level = CL_DLEVEL3; con_log(level, (KERN_NOTICE "megaraid mailbox: status:%#x cmd:%#x id:%#x ", scb->status, mbox->cmd, scb->sno)); con_log(level, ("sec:%#x lba:%#x addr:%#x ld:%d sg:%d\n", mbox->numsectors, mbox->lba, mbox->xferaddr, mbox->logdrv, mbox->numsge)); if (!scp) return; con_log(level, (KERN_NOTICE "scsi cmnd: ")); for (i = 0; i < scp->cmd_len; i++) { con_log(level, ("%#2.02x ", scp->cmnd[i])); } con_log(level, ("\n")); return; } /** * megaraid_mbox_setup_device_map - manage device ids * @adapter : Driver's soft state * * Manage the device ids to have an appropriate mapping between the kernel * scsi addresses and megaraid scsi and logical drive addresses. We export * scsi devices on their actual addresses, whereas the logical drives are * exported on a virtual scsi channel. */ static void megaraid_mbox_setup_device_map(adapter_t *adapter) { uint8_t c; uint8_t t; /* * First fill the values on the logical drive channel */ for (t = 0; t < LSI_MAX_LOGICAL_DRIVES_64LD; t++) adapter->device_ids[adapter->max_channel][t] = (t < adapter->init_id) ? t : t - 1; adapter->device_ids[adapter->max_channel][adapter->init_id] = 0xFF; /* * Fill the values on the physical devices channels */ for (c = 0; c < adapter->max_channel; c++) for (t = 0; t < LSI_MAX_LOGICAL_DRIVES_64LD; t++) adapter->device_ids[c][t] = (c << 8) | t; } /* * END: internal commands library */ /* * START: Interface for the common management module * * This is the module, which interfaces with the common management module to * provide support for ioctl and sysfs */ /** * megaraid_cmm_register - register with the management module * @adapter : HBA soft state * * Register with the management module, which allows applications to issue * ioctl calls to the drivers. This interface is used by the management module * to setup sysfs support as well. */ static int megaraid_cmm_register(adapter_t *adapter) { mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); mraid_mmadp_t adp; scb_t *scb; mbox_ccb_t *ccb; int rval; int i; // Allocate memory for the base list of scb for management module. adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL); if (adapter->uscb_list == NULL) { con_log(CL_ANN, (KERN_WARNING "megaraid: out of memory, %s %d\n", __func__, __LINE__)); return -1; } // Initialize the synchronization parameters for resources for // commands for management module INIT_LIST_HEAD(&adapter->uscb_pool); spin_lock_init(USER_FREE_LIST_LOCK(adapter)); // link all the packets. Note, CCB for commands, coming from the // commom management module, mailbox physical address are already // setup by it. We just need placeholder for that in our local command // control blocks for (i = 0; i < MBOX_MAX_USER_CMDS; i++) { scb = adapter->uscb_list + i; ccb = raid_dev->uccb_list + i; scb->ccb = (caddr_t)ccb; ccb->mbox64 = raid_dev->umbox64 + i; ccb->mbox = &ccb->mbox64->mbox32; ccb->raw_mbox = (uint8_t *)ccb->mbox; scb->gp = 0; // COMMAND ID 0 - (MBOX_MAX_SCSI_CMDS-1) ARE RESERVED FOR // COMMANDS COMING FROM IO SUBSYSTEM (MID-LAYER) scb->sno = i + MBOX_MAX_SCSI_CMDS; scb->scp = NULL; scb->state = SCB_FREE; scb->dma_direction = DMA_NONE; scb->dma_type = MRAID_DMA_NONE; scb->dev_channel = -1; scb->dev_target = -1; // put scb in the free pool list_add_tail(&scb->list, &adapter->uscb_pool); } adp.unique_id = adapter->unique_id; adp.drvr_type = DRVRTYPE_MBOX; adp.drvr_data = (unsigned long)adapter; adp.pdev = adapter->pdev; adp.issue_uioc = megaraid_mbox_mm_handler; adp.timeout = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT; adp.max_kioc = MBOX_MAX_USER_CMDS; if ((rval = mraid_mm_register_adp(&adp)) != 0) { con_log(CL_ANN, (KERN_WARNING "megaraid mbox: did not register with CMM\n")); kfree(adapter->uscb_list); } return rval; } /** * megaraid_cmm_unregister - un-register with the management module * @adapter : HBA soft state * * Un-register with the management module. * FIXME: mgmt module must return failure for unregister if it has pending * commands in LLD. */ static int megaraid_cmm_unregister(adapter_t *adapter) { kfree(adapter->uscb_list); mraid_mm_unregister_adp(adapter->unique_id); return 0; } /** * megaraid_mbox_mm_handler - interface for CMM to issue commands to LLD * @drvr_data : LLD specific data * @kioc : CMM interface packet * @action : command action * * This routine is invoked whenever the Common Management Module (CMM) has a * command for us. The 'action' parameter specifies if this is a new command * or otherwise. */ static int megaraid_mbox_mm_handler(unsigned long drvr_data, uioc_t *kioc, uint32_t action) { adapter_t *adapter; if (action != IOCTL_ISSUE) { con_log(CL_ANN, (KERN_WARNING "megaraid: unsupported management action:%#2x\n", action)); return (-ENOTSUPP); } adapter = (adapter_t *)drvr_data; // make sure this adapter is not being detached right now. if (atomic_read(&adapter->being_detached)) { con_log(CL_ANN, (KERN_WARNING "megaraid: reject management request, detaching\n")); return (-ENODEV); } switch (kioc->opcode) { case GET_ADAP_INFO: kioc->status = gather_hbainfo(adapter, (mraid_hba_info_t *) (unsigned long)kioc->buf_vaddr); kioc->done(kioc); return kioc->status; case MBOX_CMD: return megaraid_mbox_mm_command(adapter, kioc); default: kioc->status = (-EINVAL); kioc->done(kioc); return (-EINVAL); } return 0; // not reached } /** * megaraid_mbox_mm_command - issues commands routed through CMM * @adapter : HBA soft state * @kioc : management command packet * * Issues commands, which are routed through the management module. */ static int megaraid_mbox_mm_command(adapter_t *adapter, uioc_t *kioc) { struct list_head *head = &adapter->uscb_pool; mbox64_t *mbox64; uint8_t *raw_mbox; scb_t *scb; mbox_ccb_t *ccb; unsigned long flags; // detach one scb from free pool spin_lock_irqsave(USER_FREE_LIST_LOCK(adapter), flags); if (list_empty(head)) { // should never happen because of CMM con_log(CL_ANN, (KERN_WARNING "megaraid mbox: bug in cmm handler, lost resources\n")); spin_unlock_irqrestore(USER_FREE_LIST_LOCK(adapter), flags); return (-EINVAL); } scb = list_entry(head->next, scb_t, list); list_del_init(&scb->list); spin_unlock_irqrestore(USER_FREE_LIST_LOCK(adapter), flags); scb->state = SCB_ACTIVE; scb->dma_type = MRAID_DMA_NONE; scb->dma_direction = DMA_NONE; ccb = (mbox_ccb_t *)scb->ccb; mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf; raw_mbox = (uint8_t *)&mbox64->mbox32; memcpy(ccb->mbox64, mbox64, sizeof(mbox64_t)); scb->gp = (unsigned long)kioc; /* * If it is a logdrv random delete operation, we have to wait till * there are no outstanding cmds at the fw and then issue it directly */ if (raw_mbox[0] == FC_DEL_LOGDRV && raw_mbox[2] == OP_DEL_LOGDRV) { if (wait_till_fw_empty(adapter)) { con_log(CL_ANN, (KERN_NOTICE "megaraid mbox: LD delete, timed out\n")); kioc->status = -ETIME; scb->status = -1; megaraid_mbox_mm_done(adapter, scb); return (-ETIME); } INIT_LIST_HEAD(&scb->list); scb->state = SCB_ISSUED; if (mbox_post_cmd(adapter, scb) != 0) { con_log(CL_ANN, (KERN_NOTICE "megaraid mbox: LD delete, mailbox busy\n")); kioc->status = -EBUSY; scb->status = -1; megaraid_mbox_mm_done(adapter, scb); return (-EBUSY); } return 0; } // put the command on the pending list and execute megaraid_mbox_runpendq(adapter, scb); return 0; } static int wait_till_fw_empty(adapter_t *adapter) { unsigned long flags = 0; int i; /* * Set the quiescent flag to stop issuing cmds to FW. */ spin_lock_irqsave(&adapter->lock, flags); adapter->quiescent++; spin_unlock_irqrestore(&adapter->lock, flags); /* * Wait till there are no more cmds outstanding at FW. Try for at most * 60 seconds */ for (i = 0; i < 60 && adapter->outstanding_cmds; i++) { con_log(CL_DLEVEL1, (KERN_INFO "megaraid: FW has %d pending commands\n", adapter->outstanding_cmds)); msleep(1000); } return adapter->outstanding_cmds; } /** * megaraid_mbox_mm_done - callback for CMM commands * @adapter : HBA soft state * @scb : completed command * * Callback routine for internal commands originated from the management * module. */ static void megaraid_mbox_mm_done(adapter_t *adapter, scb_t *scb) { uioc_t *kioc; mbox64_t *mbox64; uint8_t *raw_mbox; unsigned long flags; kioc = (uioc_t *)scb->gp; mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf; mbox64->mbox32.status = scb->status; raw_mbox = (uint8_t *)&mbox64->mbox32; // put scb in the free pool scb->state = SCB_FREE; scb->scp = NULL; spin_lock_irqsave(USER_FREE_LIST_LOCK(adapter), flags); list_add(&scb->list, &adapter->uscb_pool); spin_unlock_irqrestore(USER_FREE_LIST_LOCK(adapter), flags); // if a delete logical drive operation succeeded, restart the // controller if (raw_mbox[0] == FC_DEL_LOGDRV && raw_mbox[2] == OP_DEL_LOGDRV) { adapter->quiescent--; megaraid_mbox_runpendq(adapter, NULL); } kioc->done(kioc); return; } /** * gather_hbainfo - HBA characteristics for the applications * @adapter : HBA soft state * @hinfo : pointer to the caller's host info strucuture */ static int gather_hbainfo(adapter_t *adapter, mraid_hba_info_t *hinfo) { hinfo->pci_vendor_id = adapter->pdev->vendor; hinfo->pci_device_id = adapter->pdev->device; hinfo->subsys_vendor_id = adapter->pdev->subsystem_vendor; hinfo->subsys_device_id = adapter->pdev->subsystem_device; hinfo->pci_bus = adapter->pdev->bus->number; hinfo->pci_dev_fn = adapter->pdev->devfn; hinfo->pci_slot = PCI_SLOT(adapter->pdev->devfn); hinfo->irq = adapter->host->irq; hinfo->baseport = ADAP2RAIDDEV(adapter)->baseport; hinfo->unique_id = (hinfo->pci_bus << 8) | adapter->pdev->devfn; hinfo->host_no = adapter->host->host_no; return 0; } /* * END: Interface for the common management module */ /** * megaraid_sysfs_alloc_resources - allocate sysfs related resources * @adapter : controller's soft state * * Allocate packets required to issue FW calls whenever the sysfs attributes * are read. These attributes would require up-to-date information from the * FW. Also set up resources for mutual exclusion to share these resources and * the wait queue. * * Return 0 on success. * Return -ERROR_CODE on failure. */ static int megaraid_sysfs_alloc_resources(adapter_t *adapter) { mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); int rval = 0; raid_dev->sysfs_uioc = kmalloc(sizeof(uioc_t), GFP_KERNEL); raid_dev->sysfs_mbox64 = kmalloc(sizeof(mbox64_t), GFP_KERNEL); raid_dev->sysfs_buffer = dma_alloc_coherent(&adapter->pdev->dev, PAGE_SIZE, &raid_dev->sysfs_buffer_dma, GFP_KERNEL); if (!raid_dev->sysfs_uioc || !raid_dev->sysfs_mbox64 || !raid_dev->sysfs_buffer) { con_log(CL_ANN, (KERN_WARNING "megaraid: out of memory, %s %d\n", __func__, __LINE__)); rval = -ENOMEM; megaraid_sysfs_free_resources(adapter); } mutex_init(&raid_dev->sysfs_mtx); init_waitqueue_head(&raid_dev->sysfs_wait_q); return rval; } /** * megaraid_sysfs_free_resources - free sysfs related resources * @adapter : controller's soft state * * Free packets allocated for sysfs FW commands */ static void megaraid_sysfs_free_resources(adapter_t *adapter) { mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); kfree(raid_dev->sysfs_uioc); kfree(raid_dev->sysfs_mbox64); if (raid_dev->sysfs_buffer) { dma_free_coherent(&adapter->pdev->dev, PAGE_SIZE, raid_dev->sysfs_buffer, raid_dev->sysfs_buffer_dma); } } /** * megaraid_sysfs_get_ldmap_done - callback for get ldmap * @uioc : completed packet * * Callback routine called in the ISR/tasklet context for get ldmap call */ static void megaraid_sysfs_get_ldmap_done(uioc_t *uioc) { adapter_t *adapter = (adapter_t *)uioc->buf_vaddr; mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); uioc->status = 0; wake_up(&raid_dev->sysfs_wait_q); } /** * megaraid_sysfs_get_ldmap_timeout - timeout handling for get ldmap * @t : timed out timer * * Timeout routine to recover and return to application, in case the adapter * has stopped responding. A timeout of 60 seconds for this command seems like * a good value. */ static void megaraid_sysfs_get_ldmap_timeout(struct timer_list *t) { struct uioc_timeout *timeout = from_timer(timeout, t, timer); uioc_t *uioc = timeout->uioc; adapter_t *adapter = (adapter_t *)uioc->buf_vaddr; mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); uioc->status = -ETIME; wake_up(&raid_dev->sysfs_wait_q); } /** * megaraid_sysfs_get_ldmap - get update logical drive map * @adapter : controller's soft state * * This routine will be called whenever user reads the logical drive * attributes, go get the current logical drive mapping table from the * firmware. We use the management API's to issue commands to the controller. * * NOTE: The commands issuance functionality is not generalized and * implemented in context of "get ld map" command only. If required, the * command issuance logical can be trivially pulled out and implemented as a * standalone library. For now, this should suffice since there is no other * user of this interface. * * Return 0 on success. * Return -1 on failure. */ static int megaraid_sysfs_get_ldmap(adapter_t *adapter) { mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); uioc_t *uioc; mbox64_t *mbox64; mbox_t *mbox; char *raw_mbox; struct uioc_timeout timeout; caddr_t ldmap; int rval = 0; /* * Allow only one read at a time to go through the sysfs attributes */ mutex_lock(&raid_dev->sysfs_mtx); uioc = raid_dev->sysfs_uioc; mbox64 = raid_dev->sysfs_mbox64; ldmap = raid_dev->sysfs_buffer; memset(uioc, 0, sizeof(uioc_t)); memset(mbox64, 0, sizeof(mbox64_t)); memset(ldmap, 0, sizeof(raid_dev->curr_ldmap)); mbox = &mbox64->mbox32; raw_mbox = (char *)mbox; uioc->cmdbuf = (uint64_t)(unsigned long)mbox64; uioc->buf_vaddr = (caddr_t)adapter; uioc->status = -ENODATA; uioc->done = megaraid_sysfs_get_ldmap_done; /* * Prepare the mailbox packet to get the current logical drive mapping * table */ mbox->xferaddr = (uint32_t)raid_dev->sysfs_buffer_dma; raw_mbox[0] = FC_DEL_LOGDRV; raw_mbox[2] = OP_GET_LDID_MAP; /* * Setup a timer to recover from a non-responding controller */ timeout.uioc = uioc; timer_setup_on_stack(&timeout.timer, megaraid_sysfs_get_ldmap_timeout, 0); timeout.timer.expires = jiffies + 60 * HZ; add_timer(&timeout.timer); /* * Send the command to the firmware */ rval = megaraid_mbox_mm_command(adapter, uioc); if (rval == 0) { // command successfully issued wait_event(raid_dev->sysfs_wait_q, (uioc->status != -ENODATA)); /* * Check if the command timed out */ if (uioc->status == -ETIME) { con_log(CL_ANN, (KERN_NOTICE "megaraid: sysfs get ld map timed out\n")); rval = -ETIME; } else { rval = mbox->status; } if (rval == 0) { memcpy(raid_dev->curr_ldmap, ldmap, sizeof(raid_dev->curr_ldmap)); } else { con_log(CL_ANN, (KERN_NOTICE "megaraid: get ld map failed with %x\n", rval)); } } else { con_log(CL_ANN, (KERN_NOTICE "megaraid: could not issue ldmap command:%x\n", rval)); } del_timer_sync(&timeout.timer); destroy_timer_on_stack(&timeout.timer); mutex_unlock(&raid_dev->sysfs_mtx); return rval; } /** * megaraid_mbox_app_hndl_show - display application handle for this adapter * @dev : class device object representation for the host * @attr : device attribute (unused) * @buf : buffer to send data to * * Display the handle used by the applications while executing management * tasks on the adapter. We invoke a management module API to get the adapter * handle, since we do not interface with applications directly. */ static ssize_t megaraid_mbox_app_hndl_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); adapter_t *adapter = (adapter_t *)SCSIHOST2ADAP(shost); uint32_t app_hndl; app_hndl = mraid_mm_adapter_app_handle(adapter->unique_id); return sysfs_emit(buf, "%u\n", app_hndl); } /** * megaraid_mbox_ld_show - display the logical drive number for this device * @dev : device object representation for the scsi device * @attr : device attribute to show * @buf : buffer to send data to * * Display the logical drive number for the device in question, if it a valid * logical drive. For physical devices, "-1" is returned. * * The logical drive number is displayed in following format: * * <SCSI ID> <LD NUM> <LD STICKY ID> <APP ADAPTER HANDLE> * * <int> <int> <int> <int> */ static ssize_t megaraid_mbox_ld_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); adapter_t *adapter = (adapter_t *)SCSIHOST2ADAP(sdev->host); mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); int scsi_id = -1; int logical_drv = -1; int ldid_map = -1; uint32_t app_hndl = 0; int mapped_sdev_id; int rval; int i; if (raid_dev->random_del_supported && MRAID_IS_LOGICAL_SDEV(adapter, sdev)) { rval = megaraid_sysfs_get_ldmap(adapter); if (rval == 0) { for (i = 0; i < MAX_LOGICAL_DRIVES_40LD; i++) { mapped_sdev_id = sdev->id; if (sdev->id > adapter->init_id) { mapped_sdev_id -= 1; } if (raid_dev->curr_ldmap[i] == mapped_sdev_id) { scsi_id = sdev->id; logical_drv = i; ldid_map = raid_dev->curr_ldmap[i]; app_hndl = mraid_mm_adapter_app_handle( adapter->unique_id); break; } } } else { con_log(CL_ANN, (KERN_NOTICE "megaraid: sysfs get ld map failed: %x\n", rval)); } } return sysfs_emit(buf, "%d %d %d %d\n", scsi_id, logical_drv, ldid_map, app_hndl); } /* * END: Mailbox Low Level Driver */ module_init(megaraid_init); module_exit(megaraid_exit);
linux-master
drivers/scsi/megaraid/megaraid_mbox.c
/* * Linux MegaRAID driver for SAS based RAID controllers * * Copyright (c) 2003-2018 LSI Corporation. * Copyright (c) 2003-2018 Avago Technologies. * Copyright (c) 2003-2018 Broadcom Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * Authors: Broadcom Inc. * Kashyap Desai <[email protected]> * Sumit Saxena <[email protected]> * Shivasharan S <[email protected]> * * Send feedback to: [email protected] */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/compat.h> #include <linux/irq_poll.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include "megaraid_sas_fusion.h" #include "megaraid_sas.h" #ifdef CONFIG_DEBUG_FS #include <linux/debugfs.h> struct dentry *megasas_debugfs_root; static ssize_t megasas_debugfs_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct megasas_debugfs_buffer *debug = filp->private_data; if (!debug || !debug->buf) return 0; return simple_read_from_buffer(ubuf, cnt, ppos, debug->buf, debug->len); } static int megasas_debugfs_raidmap_open(struct inode *inode, struct file *file) { struct megasas_instance *instance = inode->i_private; struct megasas_debugfs_buffer *debug; struct fusion_context *fusion; fusion = instance->ctrl_context; debug = kzalloc(sizeof(struct megasas_debugfs_buffer), GFP_KERNEL); if (!debug) return -ENOMEM; debug->buf = (void *)fusion->ld_drv_map[(instance->map_id & 1)]; debug->len = fusion->drv_map_sz; file->private_data = debug; return 0; } static int megasas_debugfs_release(struct inode *inode, struct file *file) { struct megasas_debug_buffer *debug = file->private_data; if (!debug) return 0; file->private_data = NULL; kfree(debug); return 0; } static const struct file_operations megasas_debugfs_raidmap_fops = { .owner = THIS_MODULE, .open = megasas_debugfs_raidmap_open, .read = megasas_debugfs_read, .release = megasas_debugfs_release, }; /* * megasas_init_debugfs : Create debugfs root for megaraid_sas driver */ void megasas_init_debugfs(void) { megasas_debugfs_root = debugfs_create_dir("megaraid_sas", NULL); if (!megasas_debugfs_root) pr_info("Cannot create debugfs root\n"); } /* * megasas_exit_debugfs : Remove debugfs root for megaraid_sas driver */ void megasas_exit_debugfs(void) { debugfs_remove_recursive(megasas_debugfs_root); } /* * megasas_setup_debugfs : Setup debugfs per Fusion adapter * instance: Soft instance of adapter */ void megasas_setup_debugfs(struct megasas_instance *instance) { char name[64]; struct fusion_context *fusion; fusion = instance->ctrl_context; if (fusion) { snprintf(name, sizeof(name), "scsi_host%d", instance->host->host_no); if (!instance->debugfs_root) { instance->debugfs_root = debugfs_create_dir(name, megasas_debugfs_root); if (!instance->debugfs_root) { dev_err(&instance->pdev->dev, "Cannot create per adapter debugfs directory\n"); return; } } snprintf(name, sizeof(name), "raidmap_dump"); instance->raidmap_dump = debugfs_create_file(name, S_IRUGO, instance->debugfs_root, instance, &megasas_debugfs_raidmap_fops); if (!instance->raidmap_dump) { dev_err(&instance->pdev->dev, "Cannot create raidmap debugfs file\n"); debugfs_remove(instance->debugfs_root); return; } } } /* * megasas_destroy_debugfs : Destroy debugfs per Fusion adapter * instance: Soft instance of adapter */ void megasas_destroy_debugfs(struct megasas_instance *instance) { debugfs_remove_recursive(instance->debugfs_root); } #else void megasas_init_debugfs(void) { } void megasas_exit_debugfs(void) { } void megasas_setup_debugfs(struct megasas_instance *instance) { } void megasas_destroy_debugfs(struct megasas_instance *instance) { } #endif /*CONFIG_DEBUG_FS*/
linux-master
drivers/scsi/megaraid/megaraid_sas_debugfs.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Linux MegaRAID driver for SAS based RAID controllers * * Copyright (c) 2009-2013 LSI Corporation * Copyright (c) 2013-2016 Avago Technologies * Copyright (c) 2016-2018 Broadcom Inc. * * FILE: megaraid_sas_fusion.c * * Authors: Broadcom Inc. * Sumant Patro * Adam Radford * Kashyap Desai <[email protected]> * Sumit Saxena <[email protected]> * * Send feedback to: [email protected] */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/list.h> #include <linux/moduleparam.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/uio.h> #include <linux/uaccess.h> #include <linux/fs.h> #include <linux/compat.h> #include <linux/blkdev.h> #include <linux/mutex.h> #include <linux/poll.h> #include <linux/vmalloc.h> #include <linux/workqueue.h> #include <linux/irq_poll.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_dbg.h> #include <linux/dmi.h> #include "megaraid_sas_fusion.h" #include "megaraid_sas.h" extern void megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, u8 alt_status); int wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, int seconds); int megasas_clear_intr_fusion(struct megasas_instance *instance); int megasas_transition_to_ready(struct megasas_instance *instance, int ocr); extern u32 megasas_dbg_lvl; int megasas_sriov_start_heartbeat(struct megasas_instance *instance, int initial); extern struct megasas_mgmt_info megasas_mgmt_info; extern unsigned int resetwaittime; extern unsigned int dual_qdepth_disable; static void megasas_free_rdpq_fusion(struct megasas_instance *instance); static void megasas_free_reply_fusion(struct megasas_instance *instance); static inline void megasas_configure_queue_sizes(struct megasas_instance *instance); static void megasas_fusion_crash_dump(struct megasas_instance *instance); /** * megasas_adp_reset_wait_for_ready - initiate chip reset and wait for * controller to come to ready state * @instance: adapter's soft state * @do_adp_reset: If true, do a chip reset * @ocr_context: If called from OCR context this will * be set to 1, else 0 * * This function initiates a chip reset followed by a wait for controller to * transition to ready state. * During this, driver will block all access to PCI config space from userspace */ int megasas_adp_reset_wait_for_ready(struct megasas_instance *instance, bool do_adp_reset, int ocr_context) { int ret = FAILED; /* * Block access to PCI config space from userspace * when diag reset is initiated from driver */ if (megasas_dbg_lvl & OCR_DEBUG) dev_info(&instance->pdev->dev, "Block access to PCI config space %s %d\n", __func__, __LINE__); pci_cfg_access_lock(instance->pdev); if (do_adp_reset) { if (instance->instancet->adp_reset (instance, instance->reg_set)) goto out; } /* Wait for FW to become ready */ if (megasas_transition_to_ready(instance, ocr_context)) { dev_warn(&instance->pdev->dev, "Failed to transition controller to ready for scsi%d.\n", instance->host->host_no); goto out; } ret = SUCCESS; out: if (megasas_dbg_lvl & OCR_DEBUG) dev_info(&instance->pdev->dev, "Unlock access to PCI config space %s %d\n", __func__, __LINE__); pci_cfg_access_unlock(instance->pdev); return ret; } /** * megasas_check_same_4gb_region - check if allocation * crosses same 4GB boundary or not * @instance: adapter's soft instance * @start_addr: start address of DMA allocation * @size: size of allocation in bytes * @return: true : allocation does not cross same * 4GB boundary * false: allocation crosses same * 4GB boundary */ static inline bool megasas_check_same_4gb_region (struct megasas_instance *instance, dma_addr_t start_addr, size_t size) { dma_addr_t end_addr; end_addr = start_addr + size; if (upper_32_bits(start_addr) != upper_32_bits(end_addr)) { dev_err(&instance->pdev->dev, "Failed to get same 4GB boundary: start_addr: 0x%llx end_addr: 0x%llx\n", (unsigned long long)start_addr, (unsigned long long)end_addr); return false; } return true; } /** * megasas_enable_intr_fusion - Enables interrupts * @instance: adapter's soft instance */ static void megasas_enable_intr_fusion(struct megasas_instance *instance) { struct megasas_register_set __iomem *regs; regs = instance->reg_set; instance->mask_interrupts = 0; /* For Thunderbolt/Invader also clear intr on enable */ writel(~0, &regs->outbound_intr_status); readl(&regs->outbound_intr_status); writel(~MFI_FUSION_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); /* Dummy readl to force pci flush */ dev_info(&instance->pdev->dev, "%s is called outbound_intr_mask:0x%08x\n", __func__, readl(&regs->outbound_intr_mask)); } /** * megasas_disable_intr_fusion - Disables interrupt * @instance: adapter's soft instance */ static void megasas_disable_intr_fusion(struct megasas_instance *instance) { u32 mask = 0xFFFFFFFF; struct megasas_register_set __iomem *regs; regs = instance->reg_set; instance->mask_interrupts = 1; writel(mask, &regs->outbound_intr_mask); /* Dummy readl to force pci flush */ dev_info(&instance->pdev->dev, "%s is called outbound_intr_mask:0x%08x\n", __func__, readl(&regs->outbound_intr_mask)); } int megasas_clear_intr_fusion(struct megasas_instance *instance) { u32 status; struct megasas_register_set __iomem *regs; regs = instance->reg_set; /* * Check if it is our interrupt */ status = megasas_readl(instance, &regs->outbound_intr_status); if (status & 1) { writel(status, &regs->outbound_intr_status); readl(&regs->outbound_intr_status); return 1; } if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) return 0; return 1; } static inline void megasas_sdev_busy_inc(struct megasas_instance *instance, struct scsi_cmnd *scmd) { if (instance->perf_mode == MR_BALANCED_PERF_MODE) { struct MR_PRIV_DEVICE *mr_device_priv_data = scmd->device->hostdata; atomic_inc(&mr_device_priv_data->sdev_priv_busy); } } static inline void megasas_sdev_busy_dec(struct megasas_instance *instance, struct scsi_cmnd *scmd) { if (instance->perf_mode == MR_BALANCED_PERF_MODE) { struct MR_PRIV_DEVICE *mr_device_priv_data = scmd->device->hostdata; atomic_dec(&mr_device_priv_data->sdev_priv_busy); } } static inline int megasas_sdev_busy_read(struct megasas_instance *instance, struct scsi_cmnd *scmd) { if (instance->perf_mode == MR_BALANCED_PERF_MODE) { struct MR_PRIV_DEVICE *mr_device_priv_data = scmd->device->hostdata; return atomic_read(&mr_device_priv_data->sdev_priv_busy); } return 0; } /** * megasas_get_cmd_fusion - Get a command from the free pool * @instance: Adapter soft state * @blk_tag: Command tag * * Returns a blk_tag indexed mpt frame */ inline struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance *instance, u32 blk_tag) { struct fusion_context *fusion; fusion = instance->ctrl_context; return fusion->cmd_list[blk_tag]; } /** * megasas_return_cmd_fusion - Return a cmd to free command pool * @instance: Adapter soft state * @cmd: Command packet to be returned to free command pool */ inline void megasas_return_cmd_fusion(struct megasas_instance *instance, struct megasas_cmd_fusion *cmd) { cmd->scmd = NULL; memset(cmd->io_request, 0, MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE); cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID; cmd->cmd_completed = false; } /** * megasas_write_64bit_req_desc - PCI writes 64bit request descriptor * @instance: Adapter soft state * @req_desc: 64bit Request descriptor */ static void megasas_write_64bit_req_desc(struct megasas_instance *instance, union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc) { #if defined(writeq) && defined(CONFIG_64BIT) u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) | le32_to_cpu(req_desc->u.low)); writeq(req_data, &instance->reg_set->inbound_low_queue_port); #else unsigned long flags; spin_lock_irqsave(&instance->hba_lock, flags); writel(le32_to_cpu(req_desc->u.low), &instance->reg_set->inbound_low_queue_port); writel(le32_to_cpu(req_desc->u.high), &instance->reg_set->inbound_high_queue_port); spin_unlock_irqrestore(&instance->hba_lock, flags); #endif } /** * megasas_fire_cmd_fusion - Sends command to the FW * @instance: Adapter soft state * @req_desc: 32bit or 64bit Request descriptor * * Perform PCI Write. AERO SERIES supports 32 bit Descriptor. * Prior to AERO_SERIES support 64 bit Descriptor. */ static void megasas_fire_cmd_fusion(struct megasas_instance *instance, union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc) { if (instance->atomic_desc_support) writel(le32_to_cpu(req_desc->u.low), &instance->reg_set->inbound_single_queue_port); else megasas_write_64bit_req_desc(instance, req_desc); } /** * megasas_fusion_update_can_queue - Do all Adapter Queue depth related calculations here * @instance: Adapter soft state * @fw_boot_context: Whether this function called during probe or after OCR * * This function is only for fusion controllers. * Update host can queue, if firmware downgrade max supported firmware commands. * Firmware upgrade case will be skipped because underlying firmware has * more resource than exposed to the OS. * */ static void megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_context) { u16 cur_max_fw_cmds = 0; u16 ldio_threshold = 0; /* ventura FW does not fill outbound_scratch_pad_2 with queue depth */ if (instance->adapter_type < VENTURA_SERIES) cur_max_fw_cmds = megasas_readl(instance, &instance->reg_set->outbound_scratch_pad_2) & 0x00FFFF; if (dual_qdepth_disable || !cur_max_fw_cmds) cur_max_fw_cmds = instance->instancet->read_fw_status_reg(instance) & 0x00FFFF; else ldio_threshold = (instance->instancet->read_fw_status_reg(instance) & 0x00FFFF) - MEGASAS_FUSION_IOCTL_CMDS; dev_info(&instance->pdev->dev, "Current firmware supports maximum commands: %d\t LDIO threshold: %d\n", cur_max_fw_cmds, ldio_threshold); if (fw_boot_context == OCR_CONTEXT) { cur_max_fw_cmds = cur_max_fw_cmds - 1; if (cur_max_fw_cmds < instance->max_fw_cmds) { instance->cur_can_queue = cur_max_fw_cmds - (MEGASAS_FUSION_INTERNAL_CMDS + MEGASAS_FUSION_IOCTL_CMDS); instance->host->can_queue = instance->cur_can_queue; instance->ldio_threshold = ldio_threshold; } } else { instance->max_fw_cmds = cur_max_fw_cmds; instance->ldio_threshold = ldio_threshold; if (reset_devices) instance->max_fw_cmds = min(instance->max_fw_cmds, (u16)MEGASAS_KDUMP_QUEUE_DEPTH); /* * Reduce the max supported cmds by 1. This is to ensure that the * reply_q_sz (1 more than the max cmd that driver may send) * does not exceed max cmds that the FW can support */ instance->max_fw_cmds = instance->max_fw_cmds-1; } } static inline void megasas_get_msix_index(struct megasas_instance *instance, struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd, u8 data_arms) { if (instance->perf_mode == MR_BALANCED_PERF_MODE && (megasas_sdev_busy_read(instance, scmd) > (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH))) { cmd->request_desc->SCSIIO.MSIxIndex = mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) / MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start); } else if (instance->msix_load_balance) { cmd->request_desc->SCSIIO.MSIxIndex = (mega_mod64(atomic64_add_return(1, &instance->total_io_count), instance->msix_vectors)); } else if (instance->host->nr_hw_queues > 1) { u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); cmd->request_desc->SCSIIO.MSIxIndex = blk_mq_unique_tag_to_hwq(tag) + instance->low_latency_index_start; } else { cmd->request_desc->SCSIIO.MSIxIndex = instance->reply_map[raw_smp_processor_id()]; } } /** * megasas_free_cmds_fusion - Free all the cmds in the free cmd pool * @instance: Adapter soft state */ void megasas_free_cmds_fusion(struct megasas_instance *instance) { int i; struct fusion_context *fusion = instance->ctrl_context; struct megasas_cmd_fusion *cmd; if (fusion->sense) dma_pool_free(fusion->sense_dma_pool, fusion->sense, fusion->sense_phys_addr); /* SG */ if (fusion->cmd_list) { for (i = 0; i < instance->max_mpt_cmds; i++) { cmd = fusion->cmd_list[i]; if (cmd) { if (cmd->sg_frame) dma_pool_free(fusion->sg_dma_pool, cmd->sg_frame, cmd->sg_frame_phys_addr); } kfree(cmd); } kfree(fusion->cmd_list); } if (fusion->sg_dma_pool) { dma_pool_destroy(fusion->sg_dma_pool); fusion->sg_dma_pool = NULL; } if (fusion->sense_dma_pool) { dma_pool_destroy(fusion->sense_dma_pool); fusion->sense_dma_pool = NULL; } /* Reply Frame, Desc*/ if (instance->is_rdpq) megasas_free_rdpq_fusion(instance); else megasas_free_reply_fusion(instance); /* Request Frame, Desc*/ if (fusion->req_frames_desc) dma_free_coherent(&instance->pdev->dev, fusion->request_alloc_sz, fusion->req_frames_desc, fusion->req_frames_desc_phys); if (fusion->io_request_frames) dma_pool_free(fusion->io_request_frames_pool, fusion->io_request_frames, fusion->io_request_frames_phys); if (fusion->io_request_frames_pool) { dma_pool_destroy(fusion->io_request_frames_pool); fusion->io_request_frames_pool = NULL; } } /** * megasas_create_sg_sense_fusion - Creates DMA pool for cmd frames * @instance: Adapter soft state * */ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance) { int i; u16 max_cmd; struct fusion_context *fusion; struct megasas_cmd_fusion *cmd; int sense_sz; u32 offset; fusion = instance->ctrl_context; max_cmd = instance->max_fw_cmds; sense_sz = instance->max_mpt_cmds * SCSI_SENSE_BUFFERSIZE; fusion->sg_dma_pool = dma_pool_create("mr_sg", &instance->pdev->dev, instance->max_chain_frame_sz, MR_DEFAULT_NVME_PAGE_SIZE, 0); /* SCSI_SENSE_BUFFERSIZE = 96 bytes */ fusion->sense_dma_pool = dma_pool_create("mr_sense", &instance->pdev->dev, sense_sz, 64, 0); if (!fusion->sense_dma_pool || !fusion->sg_dma_pool) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return -ENOMEM; } fusion->sense = dma_pool_alloc(fusion->sense_dma_pool, GFP_KERNEL, &fusion->sense_phys_addr); if (!fusion->sense) { dev_err(&instance->pdev->dev, "failed from %s %d\n", __func__, __LINE__); return -ENOMEM; } /* sense buffer, request frame and reply desc pool requires to be in * same 4 gb region. Below function will check this. * In case of failure, new pci pool will be created with updated * alignment. * Older allocation and pool will be destroyed. * Alignment will be used such a way that next allocation if success, * will always meet same 4gb region requirement. * Actual requirement is not alignment, but we need start and end of * DMA address must have same upper 32 bit address. */ if (!megasas_check_same_4gb_region(instance, fusion->sense_phys_addr, sense_sz)) { dma_pool_free(fusion->sense_dma_pool, fusion->sense, fusion->sense_phys_addr); fusion->sense = NULL; dma_pool_destroy(fusion->sense_dma_pool); fusion->sense_dma_pool = dma_pool_create("mr_sense_align", &instance->pdev->dev, sense_sz, roundup_pow_of_two(sense_sz), 0); if (!fusion->sense_dma_pool) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return -ENOMEM; } fusion->sense = dma_pool_alloc(fusion->sense_dma_pool, GFP_KERNEL, &fusion->sense_phys_addr); if (!fusion->sense) { dev_err(&instance->pdev->dev, "failed from %s %d\n", __func__, __LINE__); return -ENOMEM; } } /* * Allocate and attach a frame to each of the commands in cmd_list */ for (i = 0; i < max_cmd; i++) { cmd = fusion->cmd_list[i]; cmd->sg_frame = dma_pool_alloc(fusion->sg_dma_pool, GFP_KERNEL, &cmd->sg_frame_phys_addr); offset = SCSI_SENSE_BUFFERSIZE * i; cmd->sense = (u8 *)fusion->sense + offset; cmd->sense_phys_addr = fusion->sense_phys_addr + offset; if (!cmd->sg_frame) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return -ENOMEM; } } /* create sense buffer for the raid 1/10 fp */ for (i = max_cmd; i < instance->max_mpt_cmds; i++) { cmd = fusion->cmd_list[i]; offset = SCSI_SENSE_BUFFERSIZE * i; cmd->sense = (u8 *)fusion->sense + offset; cmd->sense_phys_addr = fusion->sense_phys_addr + offset; } return 0; } static int megasas_alloc_cmdlist_fusion(struct megasas_instance *instance) { u32 max_mpt_cmd, i, j; struct fusion_context *fusion; fusion = instance->ctrl_context; max_mpt_cmd = instance->max_mpt_cmds; /* * fusion->cmd_list is an array of struct megasas_cmd_fusion pointers. * Allocate the dynamic array first and then allocate individual * commands. */ fusion->cmd_list = kcalloc(max_mpt_cmd, sizeof(struct megasas_cmd_fusion *), GFP_KERNEL); if (!fusion->cmd_list) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return -ENOMEM; } for (i = 0; i < max_mpt_cmd; i++) { fusion->cmd_list[i] = kzalloc(sizeof(struct megasas_cmd_fusion), GFP_KERNEL); if (!fusion->cmd_list[i]) { for (j = 0; j < i; j++) kfree(fusion->cmd_list[j]); kfree(fusion->cmd_list); dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return -ENOMEM; } } return 0; } static int megasas_alloc_request_fusion(struct megasas_instance *instance) { struct fusion_context *fusion; fusion = instance->ctrl_context; retry_alloc: fusion->io_request_frames_pool = dma_pool_create("mr_ioreq", &instance->pdev->dev, fusion->io_frames_alloc_sz, 16, 0); if (!fusion->io_request_frames_pool) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return -ENOMEM; } fusion->io_request_frames = dma_pool_alloc(fusion->io_request_frames_pool, GFP_KERNEL | __GFP_NOWARN, &fusion->io_request_frames_phys); if (!fusion->io_request_frames) { if (instance->max_fw_cmds >= (MEGASAS_REDUCE_QD_COUNT * 2)) { instance->max_fw_cmds -= MEGASAS_REDUCE_QD_COUNT; dma_pool_destroy(fusion->io_request_frames_pool); megasas_configure_queue_sizes(instance); goto retry_alloc; } else { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return -ENOMEM; } } if (!megasas_check_same_4gb_region(instance, fusion->io_request_frames_phys, fusion->io_frames_alloc_sz)) { dma_pool_free(fusion->io_request_frames_pool, fusion->io_request_frames, fusion->io_request_frames_phys); fusion->io_request_frames = NULL; dma_pool_destroy(fusion->io_request_frames_pool); fusion->io_request_frames_pool = dma_pool_create("mr_ioreq_align", &instance->pdev->dev, fusion->io_frames_alloc_sz, roundup_pow_of_two(fusion->io_frames_alloc_sz), 0); if (!fusion->io_request_frames_pool) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return -ENOMEM; } fusion->io_request_frames = dma_pool_alloc(fusion->io_request_frames_pool, GFP_KERNEL | __GFP_NOWARN, &fusion->io_request_frames_phys); if (!fusion->io_request_frames) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return -ENOMEM; } } fusion->req_frames_desc = dma_alloc_coherent(&instance->pdev->dev, fusion->request_alloc_sz, &fusion->req_frames_desc_phys, GFP_KERNEL); if (!fusion->req_frames_desc) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return -ENOMEM; } return 0; } static int megasas_alloc_reply_fusion(struct megasas_instance *instance) { int i, count; struct fusion_context *fusion; union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc; fusion = instance->ctrl_context; count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; count += instance->iopoll_q_count; fusion->reply_frames_desc_pool = dma_pool_create("mr_reply", &instance->pdev->dev, fusion->reply_alloc_sz * count, 16, 0); if (!fusion->reply_frames_desc_pool) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return -ENOMEM; } fusion->reply_frames_desc[0] = dma_pool_alloc(fusion->reply_frames_desc_pool, GFP_KERNEL, &fusion->reply_frames_desc_phys[0]); if (!fusion->reply_frames_desc[0]) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return -ENOMEM; } if (!megasas_check_same_4gb_region(instance, fusion->reply_frames_desc_phys[0], (fusion->reply_alloc_sz * count))) { dma_pool_free(fusion->reply_frames_desc_pool, fusion->reply_frames_desc[0], fusion->reply_frames_desc_phys[0]); fusion->reply_frames_desc[0] = NULL; dma_pool_destroy(fusion->reply_frames_desc_pool); fusion->reply_frames_desc_pool = dma_pool_create("mr_reply_align", &instance->pdev->dev, fusion->reply_alloc_sz * count, roundup_pow_of_two(fusion->reply_alloc_sz * count), 0); if (!fusion->reply_frames_desc_pool) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return -ENOMEM; } fusion->reply_frames_desc[0] = dma_pool_alloc(fusion->reply_frames_desc_pool, GFP_KERNEL, &fusion->reply_frames_desc_phys[0]); if (!fusion->reply_frames_desc[0]) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return -ENOMEM; } } reply_desc = fusion->reply_frames_desc[0]; for (i = 0; i < fusion->reply_q_depth * count; i++, reply_desc++) reply_desc->Words = cpu_to_le64(ULLONG_MAX); /* This is not a rdpq mode, but driver still populate * reply_frame_desc array to use same msix index in ISR path. */ for (i = 0; i < (count - 1); i++) fusion->reply_frames_desc[i + 1] = fusion->reply_frames_desc[i] + (fusion->reply_alloc_sz)/sizeof(union MPI2_REPLY_DESCRIPTORS_UNION); return 0; } static int megasas_alloc_rdpq_fusion(struct megasas_instance *instance) { int i, j, k, msix_count; struct fusion_context *fusion; union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc; union MPI2_REPLY_DESCRIPTORS_UNION *rdpq_chunk_virt[RDPQ_MAX_CHUNK_COUNT]; dma_addr_t rdpq_chunk_phys[RDPQ_MAX_CHUNK_COUNT]; u8 dma_alloc_count, abs_index; u32 chunk_size, array_size, offset; fusion = instance->ctrl_context; chunk_size = fusion->reply_alloc_sz * RDPQ_MAX_INDEX_IN_ONE_CHUNK; array_size = sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION; fusion->rdpq_virt = dma_alloc_coherent(&instance->pdev->dev, array_size, &fusion->rdpq_phys, GFP_KERNEL); if (!fusion->rdpq_virt) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return -ENOMEM; } msix_count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; msix_count += instance->iopoll_q_count; fusion->reply_frames_desc_pool = dma_pool_create("mr_rdpq", &instance->pdev->dev, chunk_size, 16, 0); fusion->reply_frames_desc_pool_align = dma_pool_create("mr_rdpq_align", &instance->pdev->dev, chunk_size, roundup_pow_of_two(chunk_size), 0); if (!fusion->reply_frames_desc_pool || !fusion->reply_frames_desc_pool_align) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return -ENOMEM; } /* * For INVADER_SERIES each set of 8 reply queues(0-7, 8-15, ..) and * VENTURA_SERIES each set of 16 reply queues(0-15, 16-31, ..) should be * within 4GB boundary and also reply queues in a set must have same * upper 32-bits in their memory address. so here driver is allocating the * DMA'able memory for reply queues according. Driver uses limitation of * VENTURA_SERIES to manage INVADER_SERIES as well. */ dma_alloc_count = DIV_ROUND_UP(msix_count, RDPQ_MAX_INDEX_IN_ONE_CHUNK); for (i = 0; i < dma_alloc_count; i++) { rdpq_chunk_virt[i] = dma_pool_alloc(fusion->reply_frames_desc_pool, GFP_KERNEL, &rdpq_chunk_phys[i]); if (!rdpq_chunk_virt[i]) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return -ENOMEM; } /* reply desc pool requires to be in same 4 gb region. * Below function will check this. * In case of failure, new pci pool will be created with updated * alignment. * For RDPQ buffers, driver always allocate two separate pci pool. * Alignment will be used such a way that next allocation if * success, will always meet same 4gb region requirement. * rdpq_tracker keep track of each buffer's physical, * virtual address and pci pool descriptor. It will help driver * while freeing the resources. * */ if (!megasas_check_same_4gb_region(instance, rdpq_chunk_phys[i], chunk_size)) { dma_pool_free(fusion->reply_frames_desc_pool, rdpq_chunk_virt[i], rdpq_chunk_phys[i]); rdpq_chunk_virt[i] = dma_pool_alloc(fusion->reply_frames_desc_pool_align, GFP_KERNEL, &rdpq_chunk_phys[i]); if (!rdpq_chunk_virt[i]) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return -ENOMEM; } fusion->rdpq_tracker[i].dma_pool_ptr = fusion->reply_frames_desc_pool_align; } else { fusion->rdpq_tracker[i].dma_pool_ptr = fusion->reply_frames_desc_pool; } fusion->rdpq_tracker[i].pool_entry_phys = rdpq_chunk_phys[i]; fusion->rdpq_tracker[i].pool_entry_virt = rdpq_chunk_virt[i]; } for (k = 0; k < dma_alloc_count; k++) { for (i = 0; i < RDPQ_MAX_INDEX_IN_ONE_CHUNK; i++) { abs_index = (k * RDPQ_MAX_INDEX_IN_ONE_CHUNK) + i; if (abs_index == msix_count) break; offset = fusion->reply_alloc_sz * i; fusion->rdpq_virt[abs_index].RDPQBaseAddress = cpu_to_le64(rdpq_chunk_phys[k] + offset); fusion->reply_frames_desc_phys[abs_index] = rdpq_chunk_phys[k] + offset; fusion->reply_frames_desc[abs_index] = (union MPI2_REPLY_DESCRIPTORS_UNION *)((u8 *)rdpq_chunk_virt[k] + offset); reply_desc = fusion->reply_frames_desc[abs_index]; for (j = 0; j < fusion->reply_q_depth; j++, reply_desc++) reply_desc->Words = ULLONG_MAX; } } return 0; } static void megasas_free_rdpq_fusion(struct megasas_instance *instance) { int i; struct fusion_context *fusion; fusion = instance->ctrl_context; for (i = 0; i < RDPQ_MAX_CHUNK_COUNT; i++) { if (fusion->rdpq_tracker[i].pool_entry_virt) dma_pool_free(fusion->rdpq_tracker[i].dma_pool_ptr, fusion->rdpq_tracker[i].pool_entry_virt, fusion->rdpq_tracker[i].pool_entry_phys); } dma_pool_destroy(fusion->reply_frames_desc_pool); dma_pool_destroy(fusion->reply_frames_desc_pool_align); if (fusion->rdpq_virt) dma_free_coherent(&instance->pdev->dev, sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION, fusion->rdpq_virt, fusion->rdpq_phys); } static void megasas_free_reply_fusion(struct megasas_instance *instance) { struct fusion_context *fusion; fusion = instance->ctrl_context; if (fusion->reply_frames_desc[0]) dma_pool_free(fusion->reply_frames_desc_pool, fusion->reply_frames_desc[0], fusion->reply_frames_desc_phys[0]); dma_pool_destroy(fusion->reply_frames_desc_pool); } /** * megasas_alloc_cmds_fusion - Allocates the command packets * @instance: Adapter soft state * * * Each frame has a 32-bit field called context. This context is used to get * back the megasas_cmd_fusion from the frame when a frame gets completed * In this driver, the 32 bit values are the indices into an array cmd_list. * This array is used only to look up the megasas_cmd_fusion given the context. * The free commands themselves are maintained in a linked list called cmd_pool. * * cmds are formed in the io_request and sg_frame members of the * megasas_cmd_fusion. The context field is used to get a request descriptor * and is used as SMID of the cmd. * SMID value range is from 1 to max_fw_cmds. */ static int megasas_alloc_cmds_fusion(struct megasas_instance *instance) { int i; struct fusion_context *fusion; struct megasas_cmd_fusion *cmd; u32 offset; dma_addr_t io_req_base_phys; u8 *io_req_base; fusion = instance->ctrl_context; if (megasas_alloc_request_fusion(instance)) goto fail_exit; if (instance->is_rdpq) { if (megasas_alloc_rdpq_fusion(instance)) goto fail_exit; } else if (megasas_alloc_reply_fusion(instance)) goto fail_exit; if (megasas_alloc_cmdlist_fusion(instance)) goto fail_exit; /* The first 256 bytes (SMID 0) is not used. Don't add to the cmd list */ io_req_base = fusion->io_request_frames + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; io_req_base_phys = fusion->io_request_frames_phys + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; /* * Add all the commands to command pool (fusion->cmd_pool) */ /* SMID 0 is reserved. Set SMID/index from 1 */ for (i = 0; i < instance->max_mpt_cmds; i++) { cmd = fusion->cmd_list[i]; offset = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i; memset(cmd, 0, sizeof(struct megasas_cmd_fusion)); cmd->index = i + 1; cmd->scmd = NULL; cmd->sync_cmd_idx = (i >= instance->max_scsi_cmds && i < instance->max_fw_cmds) ? (i - instance->max_scsi_cmds) : (u32)ULONG_MAX; /* Set to Invalid */ cmd->instance = instance; cmd->io_request = (struct MPI2_RAID_SCSI_IO_REQUEST *) (io_req_base + offset); memset(cmd->io_request, 0, sizeof(struct MPI2_RAID_SCSI_IO_REQUEST)); cmd->io_request_phys_addr = io_req_base_phys + offset; cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID; } if (megasas_create_sg_sense_fusion(instance)) goto fail_exit; return 0; fail_exit: megasas_free_cmds_fusion(instance); return -ENOMEM; } /** * wait_and_poll - Issues a polling command * @instance: Adapter soft state * @cmd: Command packet to be issued * @seconds: Maximum poll time * * For polling, MFI requires the cmd_status to be set to 0xFF before posting. */ int wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, int seconds) { int i; struct megasas_header *frame_hdr = &cmd->frame->hdr; u32 status_reg; u32 msecs = seconds * 1000; /* * Wait for cmd_status to change */ for (i = 0; (i < msecs) && (frame_hdr->cmd_status == 0xff); i += 20) { rmb(); msleep(20); if (!(i % 5000)) { status_reg = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK; if (status_reg == MFI_STATE_FAULT) break; } } if (frame_hdr->cmd_status == MFI_STAT_INVALID_STATUS) return DCMD_TIMEOUT; else if (frame_hdr->cmd_status == MFI_STAT_OK) return DCMD_SUCCESS; else return DCMD_FAILED; } /** * megasas_ioc_init_fusion - Initializes the FW * @instance: Adapter soft state * * Issues the IOC Init cmd */ int megasas_ioc_init_fusion(struct megasas_instance *instance) { struct megasas_init_frame *init_frame; struct MPI2_IOC_INIT_REQUEST *IOCInitMessage = NULL; dma_addr_t ioc_init_handle; struct megasas_cmd *cmd; u8 ret, cur_rdpq_mode; struct fusion_context *fusion; union MEGASAS_REQUEST_DESCRIPTOR_UNION req_desc; int i; struct megasas_header *frame_hdr; const char *sys_info; MFI_CAPABILITIES *drv_ops; u32 scratch_pad_1; ktime_t time; bool cur_fw_64bit_dma_capable; bool cur_intr_coalescing; fusion = instance->ctrl_context; ioc_init_handle = fusion->ioc_init_request_phys; IOCInitMessage = fusion->ioc_init_request; cmd = fusion->ioc_init_cmd; scratch_pad_1 = megasas_readl (instance, &instance->reg_set->outbound_scratch_pad_1); cur_rdpq_mode = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ? 1 : 0; if (instance->adapter_type == INVADER_SERIES) { cur_fw_64bit_dma_capable = (scratch_pad_1 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET) ? true : false; if (instance->consistent_mask_64bit && !cur_fw_64bit_dma_capable) { dev_err(&instance->pdev->dev, "Driver was operating on 64bit " "DMA mask, but upcoming FW does not support 64bit DMA mask\n"); megaraid_sas_kill_hba(instance); ret = 1; goto fail_fw_init; } } if (instance->is_rdpq && !cur_rdpq_mode) { dev_err(&instance->pdev->dev, "Firmware downgrade *NOT SUPPORTED*" " from RDPQ mode to non RDPQ mode\n"); ret = 1; goto fail_fw_init; } cur_intr_coalescing = (scratch_pad_1 & MR_INTR_COALESCING_SUPPORT_OFFSET) ? true : false; if ((instance->low_latency_index_start == MR_HIGH_IOPS_QUEUE_COUNT) && cur_intr_coalescing) instance->perf_mode = MR_BALANCED_PERF_MODE; dev_info(&instance->pdev->dev, "Performance mode :%s (latency index = %d)\n", MEGASAS_PERF_MODE_2STR(instance->perf_mode), instance->low_latency_index_start); instance->fw_sync_cache_support = (scratch_pad_1 & MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0; dev_info(&instance->pdev->dev, "FW supports sync cache\t: %s\n", instance->fw_sync_cache_support ? "Yes" : "No"); memset(IOCInitMessage, 0, sizeof(struct MPI2_IOC_INIT_REQUEST)); IOCInitMessage->Function = MPI2_FUNCTION_IOC_INIT; IOCInitMessage->WhoInit = MPI2_WHOINIT_HOST_DRIVER; IOCInitMessage->MsgVersion = cpu_to_le16(MPI2_VERSION); IOCInitMessage->HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION); IOCInitMessage->SystemRequestFrameSize = cpu_to_le16(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4); IOCInitMessage->ReplyDescriptorPostQueueDepth = cpu_to_le16(fusion->reply_q_depth); IOCInitMessage->ReplyDescriptorPostQueueAddress = instance->is_rdpq ? cpu_to_le64(fusion->rdpq_phys) : cpu_to_le64(fusion->reply_frames_desc_phys[0]); IOCInitMessage->MsgFlags = instance->is_rdpq ? MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE : 0; IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys); IOCInitMessage->SenseBufferAddressHigh = cpu_to_le32(upper_32_bits(fusion->sense_phys_addr)); IOCInitMessage->HostMSIxVectors = instance->msix_vectors + instance->iopoll_q_count; IOCInitMessage->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT; time = ktime_get_real(); /* Convert to milliseconds as per FW requirement */ IOCInitMessage->TimeStamp = cpu_to_le64(ktime_to_ms(time)); init_frame = (struct megasas_init_frame *)cmd->frame; memset(init_frame, 0, IOC_INIT_FRAME_SIZE); frame_hdr = &cmd->frame->hdr; frame_hdr->cmd_status = 0xFF; frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); init_frame->cmd = MFI_CMD_INIT; init_frame->cmd_status = 0xFF; drv_ops = (MFI_CAPABILITIES *) &(init_frame->driver_operations); /* driver support Extended MSIX */ if (instance->adapter_type >= INVADER_SERIES) drv_ops->mfi_capabilities.support_additional_msix = 1; /* driver supports HA / Remote LUN over Fast Path interface */ drv_ops->mfi_capabilities.support_fp_remote_lun = 1; drv_ops->mfi_capabilities.support_max_255lds = 1; drv_ops->mfi_capabilities.support_ndrive_r1_lb = 1; drv_ops->mfi_capabilities.security_protocol_cmds_fw = 1; if (instance->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN) drv_ops->mfi_capabilities.support_ext_io_size = 1; drv_ops->mfi_capabilities.support_fp_rlbypass = 1; if (!dual_qdepth_disable) drv_ops->mfi_capabilities.support_ext_queue_depth = 1; drv_ops->mfi_capabilities.support_qd_throttling = 1; drv_ops->mfi_capabilities.support_pd_map_target_id = 1; drv_ops->mfi_capabilities.support_nvme_passthru = 1; drv_ops->mfi_capabilities.support_fw_exposed_dev_list = 1; if (reset_devices) drv_ops->mfi_capabilities.support_memdump = 1; if (instance->consistent_mask_64bit) drv_ops->mfi_capabilities.support_64bit_mode = 1; /* Convert capability to LE32 */ cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities); sys_info = dmi_get_system_info(DMI_PRODUCT_UUID); if (instance->system_info_buf && sys_info) { memcpy(instance->system_info_buf->systemId, sys_info, strlen(sys_info) > 64 ? 64 : strlen(sys_info)); instance->system_info_buf->systemIdLength = strlen(sys_info) > 64 ? 64 : strlen(sys_info); init_frame->system_info_lo = cpu_to_le32(lower_32_bits(instance->system_info_h)); init_frame->system_info_hi = cpu_to_le32(upper_32_bits(instance->system_info_h)); } init_frame->queue_info_new_phys_addr_hi = cpu_to_le32(upper_32_bits(ioc_init_handle)); init_frame->queue_info_new_phys_addr_lo = cpu_to_le32(lower_32_bits(ioc_init_handle)); init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST)); /* * Each bit in replyqueue_mask represents one group of MSI-x vectors * (each group has 8 vectors) */ switch (instance->perf_mode) { case MR_BALANCED_PERF_MODE: init_frame->replyqueue_mask = cpu_to_le16(~(~0 << instance->low_latency_index_start/8)); break; case MR_IOPS_PERF_MODE: init_frame->replyqueue_mask = cpu_to_le16(~(~0 << instance->msix_vectors/8)); break; } req_desc.u.low = cpu_to_le32(lower_32_bits(cmd->frame_phys_addr)); req_desc.u.high = cpu_to_le32(upper_32_bits(cmd->frame_phys_addr)); req_desc.MFAIo.RequestFlags = (MEGASAS_REQ_DESCRIPT_FLAGS_MFA << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); /* * disable the intr before firing the init frame */ instance->instancet->disable_intr(instance); for (i = 0; i < (10 * 1000); i += 20) { if (megasas_readl(instance, &instance->reg_set->doorbell) & 1) msleep(20); else break; } /* For AERO also, IOC_INIT requires 64 bit descriptor write */ megasas_write_64bit_req_desc(instance, &req_desc); wait_and_poll(instance, cmd, MFI_IO_TIMEOUT_SECS); frame_hdr = &cmd->frame->hdr; if (frame_hdr->cmd_status != 0) { ret = 1; goto fail_fw_init; } if (instance->adapter_type >= AERO_SERIES) { scratch_pad_1 = megasas_readl (instance, &instance->reg_set->outbound_scratch_pad_1); instance->atomic_desc_support = (scratch_pad_1 & MR_ATOMIC_DESCRIPTOR_SUPPORT_OFFSET) ? 1 : 0; dev_info(&instance->pdev->dev, "FW supports atomic descriptor\t: %s\n", instance->atomic_desc_support ? "Yes" : "No"); } return 0; fail_fw_init: dev_err(&instance->pdev->dev, "Init cmd return status FAILED for SCSI host %d\n", instance->host->host_no); return ret; } /** * megasas_sync_pd_seq_num - JBOD SEQ MAP * @instance: Adapter soft state * @pend: set to 1, if it is pended jbod map. * * Issue Jbod map to the firmware. If it is pended command, * issue command and return. If it is first instance of jbod map * issue and receive command. */ int megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) { int ret = 0; size_t pd_seq_map_sz; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; struct fusion_context *fusion = instance->ctrl_context; struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; dma_addr_t pd_seq_h; pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id & 1)]; pd_seq_h = fusion->pd_seq_phys[(instance->pd_seq_map_id & 1)]; pd_seq_map_sz = struct_size(pd_sync, seq, MAX_PHYSICAL_DEVICES); cmd = megasas_get_cmd(instance); if (!cmd) { dev_err(&instance->pdev->dev, "Could not get mfi cmd. Fail from %s %d\n", __func__, __LINE__); return -ENOMEM; } dcmd = &cmd->frame->dcmd; memset(pd_sync, 0, pd_seq_map_sz); memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); if (pend) { dcmd->mbox.b[0] = MEGASAS_DCMD_MBOX_PEND_FLAG; dcmd->flags = MFI_FRAME_DIR_WRITE; instance->jbod_seq_cmd = cmd; } else { dcmd->flags = MFI_FRAME_DIR_READ; } dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = cpu_to_le32(pd_seq_map_sz); dcmd->opcode = cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO); megasas_set_dma_settings(instance, dcmd, pd_seq_h, pd_seq_map_sz); if (pend) { instance->instancet->issue_dcmd(instance, cmd); return 0; } /* Below code is only for non pended DCMD */ if (!instance->mask_interrupts) ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); else ret = megasas_issue_polled(instance, cmd); if (le32_to_cpu(pd_sync->count) > MAX_PHYSICAL_DEVICES) { dev_warn(&instance->pdev->dev, "driver supports max %d JBOD, but FW reports %d\n", MAX_PHYSICAL_DEVICES, le32_to_cpu(pd_sync->count)); ret = -EINVAL; } if (ret == DCMD_TIMEOUT) dev_warn(&instance->pdev->dev, "%s DCMD timed out, continue without JBOD sequence map\n", __func__); if (ret == DCMD_SUCCESS) instance->pd_seq_map_id++; megasas_return_cmd(instance, cmd); return ret; } /* * megasas_get_ld_map_info - Returns FW's ld_map structure * @instance: Adapter soft state * @pend: Pend the command or not * Issues an internal command (DCMD) to get the FW's controller PD * list structure. This information is mainly used to find out SYSTEM * supported by the FW. * dcmd.mbox value setting for MR_DCMD_LD_MAP_GET_INFO * dcmd.mbox.b[0] - number of LDs being sync'd * dcmd.mbox.b[1] - 0 - complete command immediately. * - 1 - pend till config change * dcmd.mbox.b[2] - 0 - supports max 64 lds and uses legacy MR_FW_RAID_MAP * - 1 - supports max MAX_LOGICAL_DRIVES_EXT lds and * uses extended struct MR_FW_RAID_MAP_EXT */ static int megasas_get_ld_map_info(struct megasas_instance *instance) { int ret = 0; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; void *ci; dma_addr_t ci_h = 0; u32 size_map_info; struct fusion_context *fusion; cmd = megasas_get_cmd(instance); if (!cmd) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get cmd for map info\n"); return -ENOMEM; } fusion = instance->ctrl_context; if (!fusion) { megasas_return_cmd(instance, cmd); return -ENXIO; } dcmd = &cmd->frame->dcmd; size_map_info = fusion->current_map_sz; ci = (void *) fusion->ld_map[(instance->map_id & 1)]; ci_h = fusion->ld_map_phys[(instance->map_id & 1)]; if (!ci) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for ld_map_info\n"); megasas_return_cmd(instance, cmd); return -ENOMEM; } memset(ci, 0, fusion->max_map_sz); memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = cpu_to_le32(size_map_info); dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO); megasas_set_dma_settings(instance, dcmd, ci_h, size_map_info); if (!instance->mask_interrupts) ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); else ret = megasas_issue_polled(instance, cmd); if (ret == DCMD_TIMEOUT) dev_warn(&instance->pdev->dev, "%s DCMD timed out, RAID map is disabled\n", __func__); megasas_return_cmd(instance, cmd); return ret; } u8 megasas_get_map_info(struct megasas_instance *instance) { struct fusion_context *fusion = instance->ctrl_context; fusion->fast_path_io = 0; if (!megasas_get_ld_map_info(instance)) { if (MR_ValidateMapInfo(instance, instance->map_id)) { fusion->fast_path_io = 1; return 0; } } return 1; } /* * megasas_sync_map_info - Returns FW's ld_map structure * @instance: Adapter soft state * * Issues an internal command (DCMD) to get the FW's controller PD * list structure. This information is mainly used to find out SYSTEM * supported by the FW. */ int megasas_sync_map_info(struct megasas_instance *instance) { int i; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; u16 num_lds; struct fusion_context *fusion; struct MR_LD_TARGET_SYNC *ci = NULL; struct MR_DRV_RAID_MAP_ALL *map; struct MR_LD_RAID *raid; struct MR_LD_TARGET_SYNC *ld_sync; dma_addr_t ci_h = 0; u32 size_map_info; cmd = megasas_get_cmd(instance); if (!cmd) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get cmd for sync info\n"); return -ENOMEM; } fusion = instance->ctrl_context; if (!fusion) { megasas_return_cmd(instance, cmd); return 1; } map = fusion->ld_drv_map[instance->map_id & 1]; num_lds = le16_to_cpu(map->raidMap.ldCount); dcmd = &cmd->frame->dcmd; memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); ci = (struct MR_LD_TARGET_SYNC *) fusion->ld_map[(instance->map_id - 1) & 1]; memset(ci, 0, fusion->max_map_sz); ci_h = fusion->ld_map_phys[(instance->map_id - 1) & 1]; ld_sync = (struct MR_LD_TARGET_SYNC *)ci; for (i = 0; i < num_lds; i++, ld_sync++) { raid = MR_LdRaidGet(i, map); ld_sync->targetId = MR_GetLDTgtId(i, map); ld_sync->seqNum = raid->seqNum; } size_map_info = fusion->current_map_sz; dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_WRITE; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = cpu_to_le32(size_map_info); dcmd->mbox.b[0] = num_lds; dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG; dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO); megasas_set_dma_settings(instance, dcmd, ci_h, size_map_info); instance->map_update_cmd = cmd; instance->instancet->issue_dcmd(instance, cmd); return 0; } /* * meagasas_display_intel_branding - Display branding string * @instance: per adapter object * * Return nothing. */ static void megasas_display_intel_branding(struct megasas_instance *instance) { if (instance->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL) return; switch (instance->pdev->device) { case PCI_DEVICE_ID_LSI_INVADER: switch (instance->pdev->subsystem_device) { case MEGARAID_INTEL_RS3DC080_SSDID: dev_info(&instance->pdev->dev, "scsi host %d: %s\n", instance->host->host_no, MEGARAID_INTEL_RS3DC080_BRANDING); break; case MEGARAID_INTEL_RS3DC040_SSDID: dev_info(&instance->pdev->dev, "scsi host %d: %s\n", instance->host->host_no, MEGARAID_INTEL_RS3DC040_BRANDING); break; case MEGARAID_INTEL_RS3SC008_SSDID: dev_info(&instance->pdev->dev, "scsi host %d: %s\n", instance->host->host_no, MEGARAID_INTEL_RS3SC008_BRANDING); break; case MEGARAID_INTEL_RS3MC044_SSDID: dev_info(&instance->pdev->dev, "scsi host %d: %s\n", instance->host->host_no, MEGARAID_INTEL_RS3MC044_BRANDING); break; default: break; } break; case PCI_DEVICE_ID_LSI_FURY: switch (instance->pdev->subsystem_device) { case MEGARAID_INTEL_RS3WC080_SSDID: dev_info(&instance->pdev->dev, "scsi host %d: %s\n", instance->host->host_no, MEGARAID_INTEL_RS3WC080_BRANDING); break; case MEGARAID_INTEL_RS3WC040_SSDID: dev_info(&instance->pdev->dev, "scsi host %d: %s\n", instance->host->host_no, MEGARAID_INTEL_RS3WC040_BRANDING); break; default: break; } break; case PCI_DEVICE_ID_LSI_CUTLASS_52: case PCI_DEVICE_ID_LSI_CUTLASS_53: switch (instance->pdev->subsystem_device) { case MEGARAID_INTEL_RMS3BC160_SSDID: dev_info(&instance->pdev->dev, "scsi host %d: %s\n", instance->host->host_no, MEGARAID_INTEL_RMS3BC160_BRANDING); break; default: break; } break; default: break; } } /** * megasas_allocate_raid_maps - Allocate memory for RAID maps * @instance: Adapter soft state * * return: if success: return 0 * failed: return -ENOMEM */ static inline int megasas_allocate_raid_maps(struct megasas_instance *instance) { struct fusion_context *fusion; int i = 0; fusion = instance->ctrl_context; fusion->drv_map_pages = get_order(fusion->drv_map_sz); for (i = 0; i < 2; i++) { fusion->ld_map[i] = NULL; fusion->ld_drv_map[i] = (void *) __get_free_pages(__GFP_ZERO | GFP_KERNEL, fusion->drv_map_pages); if (!fusion->ld_drv_map[i]) { fusion->ld_drv_map[i] = vzalloc(fusion->drv_map_sz); if (!fusion->ld_drv_map[i]) { dev_err(&instance->pdev->dev, "Could not allocate memory for local map" " size requested: %d\n", fusion->drv_map_sz); goto ld_drv_map_alloc_fail; } } } for (i = 0; i < 2; i++) { fusion->ld_map[i] = dma_alloc_coherent(&instance->pdev->dev, fusion->max_map_sz, &fusion->ld_map_phys[i], GFP_KERNEL); if (!fusion->ld_map[i]) { dev_err(&instance->pdev->dev, "Could not allocate memory for map info %s:%d\n", __func__, __LINE__); goto ld_map_alloc_fail; } } return 0; ld_map_alloc_fail: for (i = 0; i < 2; i++) { if (fusion->ld_map[i]) dma_free_coherent(&instance->pdev->dev, fusion->max_map_sz, fusion->ld_map[i], fusion->ld_map_phys[i]); } ld_drv_map_alloc_fail: for (i = 0; i < 2; i++) { if (fusion->ld_drv_map[i]) { if (is_vmalloc_addr(fusion->ld_drv_map[i])) vfree(fusion->ld_drv_map[i]); else free_pages((ulong)fusion->ld_drv_map[i], fusion->drv_map_pages); } } return -ENOMEM; } /** * megasas_configure_queue_sizes - Calculate size of request desc queue, * reply desc queue, * IO request frame queue, set can_queue. * @instance: Adapter soft state * @return: void */ static inline void megasas_configure_queue_sizes(struct megasas_instance *instance) { struct fusion_context *fusion; u16 max_cmd; fusion = instance->ctrl_context; max_cmd = instance->max_fw_cmds; if (instance->adapter_type >= VENTURA_SERIES) instance->max_mpt_cmds = instance->max_fw_cmds * RAID_1_PEER_CMDS; else instance->max_mpt_cmds = instance->max_fw_cmds; instance->max_scsi_cmds = instance->max_fw_cmds - instance->max_mfi_cmds; instance->cur_can_queue = instance->max_scsi_cmds; instance->host->can_queue = instance->cur_can_queue; fusion->reply_q_depth = 2 * ((max_cmd + 1 + 15) / 16) * 16; fusion->request_alloc_sz = sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) * instance->max_mpt_cmds; fusion->reply_alloc_sz = sizeof(union MPI2_REPLY_DESCRIPTORS_UNION) * (fusion->reply_q_depth); fusion->io_frames_alloc_sz = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (instance->max_mpt_cmds + 1)); /* Extra 1 for SMID 0 */ } static int megasas_alloc_ioc_init_frame(struct megasas_instance *instance) { struct fusion_context *fusion; struct megasas_cmd *cmd; fusion = instance->ctrl_context; cmd = kzalloc(sizeof(struct megasas_cmd), GFP_KERNEL); if (!cmd) { dev_err(&instance->pdev->dev, "Failed from func: %s line: %d\n", __func__, __LINE__); return -ENOMEM; } cmd->frame = dma_alloc_coherent(&instance->pdev->dev, IOC_INIT_FRAME_SIZE, &cmd->frame_phys_addr, GFP_KERNEL); if (!cmd->frame) { dev_err(&instance->pdev->dev, "Failed from func: %s line: %d\n", __func__, __LINE__); kfree(cmd); return -ENOMEM; } fusion->ioc_init_cmd = cmd; return 0; } /** * megasas_free_ioc_init_cmd - Free IOC INIT command frame * @instance: Adapter soft state */ static inline void megasas_free_ioc_init_cmd(struct megasas_instance *instance) { struct fusion_context *fusion; fusion = instance->ctrl_context; if (fusion->ioc_init_cmd && fusion->ioc_init_cmd->frame) dma_free_coherent(&instance->pdev->dev, IOC_INIT_FRAME_SIZE, fusion->ioc_init_cmd->frame, fusion->ioc_init_cmd->frame_phys_addr); kfree(fusion->ioc_init_cmd); } /** * megasas_init_adapter_fusion - Initializes the FW * @instance: Adapter soft state * * This is the main function for initializing firmware. */ static u32 megasas_init_adapter_fusion(struct megasas_instance *instance) { struct fusion_context *fusion; u32 scratch_pad_1; int i = 0, count; u32 status_reg; fusion = instance->ctrl_context; megasas_fusion_update_can_queue(instance, PROBE_CONTEXT); /* * Only Driver's internal DCMDs and IOCTL DCMDs needs to have MFI frames */ instance->max_mfi_cmds = MEGASAS_FUSION_INTERNAL_CMDS + MEGASAS_FUSION_IOCTL_CMDS; megasas_configure_queue_sizes(instance); scratch_pad_1 = megasas_readl(instance, &instance->reg_set->outbound_scratch_pad_1); /* If scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set, * Firmware support extended IO chain frame which is 4 times more than * legacy Firmware. * Legacy Firmware - Frame size is (8 * 128) = 1K * 1M IO Firmware - Frame size is (8 * 128 * 4) = 4K */ if (scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK) instance->max_chain_frame_sz = ((scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_1MB_IO; else instance->max_chain_frame_sz = ((scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_256K_IO; if (instance->max_chain_frame_sz < MEGASAS_CHAIN_FRAME_SZ_MIN) { dev_warn(&instance->pdev->dev, "frame size %d invalid, fall back to legacy max frame size %d\n", instance->max_chain_frame_sz, MEGASAS_CHAIN_FRAME_SZ_MIN); instance->max_chain_frame_sz = MEGASAS_CHAIN_FRAME_SZ_MIN; } fusion->max_sge_in_main_msg = (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL))/16; fusion->max_sge_in_chain = instance->max_chain_frame_sz / sizeof(union MPI2_SGE_IO_UNION); instance->max_num_sge = rounddown_pow_of_two(fusion->max_sge_in_main_msg + fusion->max_sge_in_chain - 2); /* Used for pass thru MFI frame (DCMD) */ fusion->chain_offset_mfi_pthru = offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL)/16; fusion->chain_offset_io_request = (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - sizeof(union MPI2_SGE_IO_UNION))/16; count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; count += instance->iopoll_q_count; for (i = 0 ; i < count; i++) fusion->last_reply_idx[i] = 0; /* * For fusion adapters, 3 commands for IOCTL and 8 commands * for driver's internal DCMDs. */ instance->max_scsi_cmds = instance->max_fw_cmds - (MEGASAS_FUSION_INTERNAL_CMDS + MEGASAS_FUSION_IOCTL_CMDS); sema_init(&instance->ioctl_sem, MEGASAS_FUSION_IOCTL_CMDS); for (i = 0; i < MAX_MSIX_QUEUES_FUSION; i++) atomic_set(&fusion->busy_mq_poll[i], 0); if (megasas_alloc_ioc_init_frame(instance)) return 1; /* * Allocate memory for descriptors * Create a pool of commands */ if (megasas_alloc_cmds(instance)) goto fail_alloc_mfi_cmds; if (megasas_alloc_cmds_fusion(instance)) goto fail_alloc_cmds; if (megasas_ioc_init_fusion(instance)) { status_reg = instance->instancet->read_fw_status_reg(instance); if (((status_reg & MFI_STATE_MASK) == MFI_STATE_FAULT) && (status_reg & MFI_RESET_ADAPTER)) { /* Do a chip reset and then retry IOC INIT once */ if (megasas_adp_reset_wait_for_ready (instance, true, 0) == FAILED) goto fail_ioc_init; if (megasas_ioc_init_fusion(instance)) goto fail_ioc_init; } else { goto fail_ioc_init; } } megasas_display_intel_branding(instance); if (megasas_get_ctrl_info(instance)) { dev_err(&instance->pdev->dev, "Could not get controller info. Fail from %s %d\n", __func__, __LINE__); goto fail_ioc_init; } instance->flag_ieee = 1; instance->r1_ldio_hint_default = MR_R1_LDIO_PIGGYBACK_DEFAULT; instance->threshold_reply_count = instance->max_fw_cmds / 4; fusion->fast_path_io = 0; if (megasas_allocate_raid_maps(instance)) goto fail_ioc_init; if (!megasas_get_map_info(instance)) megasas_sync_map_info(instance); return 0; fail_ioc_init: megasas_free_cmds_fusion(instance); fail_alloc_cmds: megasas_free_cmds(instance); fail_alloc_mfi_cmds: megasas_free_ioc_init_cmd(instance); return 1; } /** * megasas_fault_detect_work - Worker function of * FW fault handling workqueue. * @work: FW fault work struct */ static void megasas_fault_detect_work(struct work_struct *work) { struct megasas_instance *instance = container_of(work, struct megasas_instance, fw_fault_work.work); u32 fw_state, dma_state, status; /* Check the fw state */ fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK; if (fw_state == MFI_STATE_FAULT) { dma_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_DMADONE; /* Start collecting crash, if DMA bit is done */ if (instance->crash_dump_drv_support && instance->crash_dump_app_support && dma_state) { megasas_fusion_crash_dump(instance); } else { if (instance->unload == 0) { status = megasas_reset_fusion(instance->host, 0); if (status != SUCCESS) { dev_err(&instance->pdev->dev, "Failed from %s %d, do not re-arm timer\n", __func__, __LINE__); return; } } } } if (instance->fw_fault_work_q) queue_delayed_work(instance->fw_fault_work_q, &instance->fw_fault_work, msecs_to_jiffies(MEGASAS_WATCHDOG_THREAD_INTERVAL)); } int megasas_fusion_start_watchdog(struct megasas_instance *instance) { /* Check if the Fault WQ is already started */ if (instance->fw_fault_work_q) return SUCCESS; INIT_DELAYED_WORK(&instance->fw_fault_work, megasas_fault_detect_work); snprintf(instance->fault_handler_work_q_name, sizeof(instance->fault_handler_work_q_name), "poll_megasas%d_status", instance->host->host_no); instance->fw_fault_work_q = create_singlethread_workqueue(instance->fault_handler_work_q_name); if (!instance->fw_fault_work_q) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return FAILED; } queue_delayed_work(instance->fw_fault_work_q, &instance->fw_fault_work, msecs_to_jiffies(MEGASAS_WATCHDOG_THREAD_INTERVAL)); return SUCCESS; } void megasas_fusion_stop_watchdog(struct megasas_instance *instance) { struct workqueue_struct *wq; if (instance->fw_fault_work_q) { wq = instance->fw_fault_work_q; instance->fw_fault_work_q = NULL; if (!cancel_delayed_work_sync(&instance->fw_fault_work)) flush_workqueue(wq); destroy_workqueue(wq); } } /** * map_cmd_status - Maps FW cmd status to OS cmd status * @fusion: fusion context * @scmd: Pointer to cmd * @status: status of cmd returned by FW * @ext_status: ext status of cmd returned by FW * @data_length: command data length * @sense: command sense data */ static void map_cmd_status(struct fusion_context *fusion, struct scsi_cmnd *scmd, u8 status, u8 ext_status, u32 data_length, u8 *sense) { u8 cmd_type; int resid; cmd_type = megasas_cmd_type(scmd); switch (status) { case MFI_STAT_OK: scmd->result = DID_OK << 16; break; case MFI_STAT_SCSI_IO_FAILED: case MFI_STAT_LD_INIT_IN_PROGRESS: scmd->result = (DID_ERROR << 16) | ext_status; break; case MFI_STAT_SCSI_DONE_WITH_ERROR: scmd->result = (DID_OK << 16) | ext_status; if (ext_status == SAM_STAT_CHECK_CONDITION) { memcpy(scmd->sense_buffer, sense, SCSI_SENSE_BUFFERSIZE); } /* * If the IO request is partially completed, then MR FW will * update "io_request->DataLength" field with actual number of * bytes transferred.Driver will set residual bytes count in * SCSI command structure. */ resid = (scsi_bufflen(scmd) - data_length); scsi_set_resid(scmd, resid); if (resid && ((cmd_type == READ_WRITE_LDIO) || (cmd_type == READ_WRITE_SYSPDIO))) scmd_printk(KERN_INFO, scmd, "BRCM Debug mfi stat 0x%x, data len" " requested/completed 0x%x/0x%x\n", status, scsi_bufflen(scmd), data_length); break; case MFI_STAT_LD_OFFLINE: case MFI_STAT_DEVICE_NOT_FOUND: scmd->result = DID_BAD_TARGET << 16; break; case MFI_STAT_CONFIG_SEQ_MISMATCH: scmd->result = DID_IMM_RETRY << 16; break; default: scmd->result = DID_ERROR << 16; break; } } /** * megasas_is_prp_possible - * Checks if native NVMe PRPs can be built for the IO * * @instance: Adapter soft state * @scmd: SCSI command from the mid-layer * @sge_count: scatter gather element count. * * Returns: true: PRPs can be built * false: IEEE SGLs needs to be built */ static bool megasas_is_prp_possible(struct megasas_instance *instance, struct scsi_cmnd *scmd, int sge_count) { u32 data_length = 0; struct scatterlist *sg_scmd; bool build_prp = false; u32 mr_nvme_pg_size; mr_nvme_pg_size = max_t(u32, instance->nvme_page_size, MR_DEFAULT_NVME_PAGE_SIZE); data_length = scsi_bufflen(scmd); sg_scmd = scsi_sglist(scmd); /* * NVMe uses one PRP for each page (or part of a page) * look at the data length - if 4 pages or less then IEEE is OK * if > 5 pages then we need to build a native SGL * if > 4 and <= 5 pages, then check physical address of 1st SG entry * if this first size in the page is >= the residual beyond 4 pages * then use IEEE, otherwise use native SGL */ if (data_length > (mr_nvme_pg_size * 5)) { build_prp = true; } else if ((data_length > (mr_nvme_pg_size * 4)) && (data_length <= (mr_nvme_pg_size * 5))) { /* check if 1st SG entry size is < residual beyond 4 pages */ if (sg_dma_len(sg_scmd) < (data_length - (mr_nvme_pg_size * 4))) build_prp = true; } return build_prp; } /** * megasas_make_prp_nvme - * Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only * * @instance: Adapter soft state * @scmd: SCSI command from the mid-layer * @sgl_ptr: SGL to be filled in * @cmd: Fusion command frame * @sge_count: scatter gather element count. * * Returns: true: PRPs are built * false: IEEE SGLs needs to be built */ static bool megasas_make_prp_nvme(struct megasas_instance *instance, struct scsi_cmnd *scmd, struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr, struct megasas_cmd_fusion *cmd, int sge_count) { int sge_len, offset, num_prp_in_chain = 0; struct MPI25_IEEE_SGE_CHAIN64 *main_chain_element, *ptr_first_sgl; u64 *ptr_sgl; dma_addr_t ptr_sgl_phys; u64 sge_addr; u32 page_mask, page_mask_result; struct scatterlist *sg_scmd; u32 first_prp_len; bool build_prp = false; int data_len = scsi_bufflen(scmd); u32 mr_nvme_pg_size = max_t(u32, instance->nvme_page_size, MR_DEFAULT_NVME_PAGE_SIZE); build_prp = megasas_is_prp_possible(instance, scmd, sge_count); if (!build_prp) return false; /* * Nvme has a very convoluted prp format. One prp is required * for each page or partial page. Driver need to split up OS sg_list * entries if it is longer than one page or cross a page * boundary. Driver also have to insert a PRP list pointer entry as * the last entry in each physical page of the PRP list. * * NOTE: The first PRP "entry" is actually placed in the first * SGL entry in the main message as IEEE 64 format. The 2nd * entry in the main message is the chain element, and the rest * of the PRP entries are built in the contiguous pcie buffer. */ page_mask = mr_nvme_pg_size - 1; ptr_sgl = (u64 *)cmd->sg_frame; ptr_sgl_phys = cmd->sg_frame_phys_addr; memset(ptr_sgl, 0, instance->max_chain_frame_sz); /* Build chain frame element which holds all prps except first*/ main_chain_element = (struct MPI25_IEEE_SGE_CHAIN64 *) ((u8 *)sgl_ptr + sizeof(struct MPI25_IEEE_SGE_CHAIN64)); main_chain_element->Address = cpu_to_le64(ptr_sgl_phys); main_chain_element->NextChainOffset = 0; main_chain_element->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT | IEEE_SGE_FLAGS_SYSTEM_ADDR | MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP; /* Build first prp, sge need not to be page aligned*/ ptr_first_sgl = sgl_ptr; sg_scmd = scsi_sglist(scmd); sge_addr = sg_dma_address(sg_scmd); sge_len = sg_dma_len(sg_scmd); offset = (u32)(sge_addr & page_mask); first_prp_len = mr_nvme_pg_size - offset; ptr_first_sgl->Address = cpu_to_le64(sge_addr); ptr_first_sgl->Length = cpu_to_le32(first_prp_len); data_len -= first_prp_len; if (sge_len > first_prp_len) { sge_addr += first_prp_len; sge_len -= first_prp_len; } else if (sge_len == first_prp_len) { sg_scmd = sg_next(sg_scmd); sge_addr = sg_dma_address(sg_scmd); sge_len = sg_dma_len(sg_scmd); } for (;;) { offset = (u32)(sge_addr & page_mask); /* Put PRP pointer due to page boundary*/ page_mask_result = (uintptr_t)(ptr_sgl + 1) & page_mask; if (unlikely(!page_mask_result)) { scmd_printk(KERN_NOTICE, scmd, "page boundary ptr_sgl: 0x%p\n", ptr_sgl); ptr_sgl_phys += 8; *ptr_sgl = cpu_to_le64(ptr_sgl_phys); ptr_sgl++; num_prp_in_chain++; } *ptr_sgl = cpu_to_le64(sge_addr); ptr_sgl++; ptr_sgl_phys += 8; num_prp_in_chain++; sge_addr += mr_nvme_pg_size; sge_len -= mr_nvme_pg_size; data_len -= mr_nvme_pg_size; if (data_len <= 0) break; if (sge_len > 0) continue; sg_scmd = sg_next(sg_scmd); sge_addr = sg_dma_address(sg_scmd); sge_len = sg_dma_len(sg_scmd); } main_chain_element->Length = cpu_to_le32(num_prp_in_chain * sizeof(u64)); return build_prp; } /** * megasas_make_sgl_fusion - Prepares 32-bit SGL * @instance: Adapter soft state * @scp: SCSI command from the mid-layer * @sgl_ptr: SGL to be filled in * @cmd: cmd we are working on * @sge_count: sge count * */ static void megasas_make_sgl_fusion(struct megasas_instance *instance, struct scsi_cmnd *scp, struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr, struct megasas_cmd_fusion *cmd, int sge_count) { int i, sg_processed; struct scatterlist *os_sgl; struct fusion_context *fusion; fusion = instance->ctrl_context; if (instance->adapter_type >= INVADER_SERIES) { struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = sgl_ptr; sgl_ptr_end += fusion->max_sge_in_main_msg - 1; sgl_ptr_end->Flags = 0; } scsi_for_each_sg(scp, os_sgl, sge_count, i) { sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl)); sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl)); sgl_ptr->Flags = 0; if (instance->adapter_type >= INVADER_SERIES) if (i == sge_count - 1) sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST; sgl_ptr++; sg_processed = i + 1; if ((sg_processed == (fusion->max_sge_in_main_msg - 1)) && (sge_count > fusion->max_sge_in_main_msg)) { struct MPI25_IEEE_SGE_CHAIN64 *sg_chain; if (instance->adapter_type >= INVADER_SERIES) { if ((le16_to_cpu(cmd->io_request->IoFlags) & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) != MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) cmd->io_request->ChainOffset = fusion-> chain_offset_io_request; else cmd->io_request->ChainOffset = 0; } else cmd->io_request->ChainOffset = fusion->chain_offset_io_request; sg_chain = sgl_ptr; /* Prepare chain element */ sg_chain->NextChainOffset = 0; if (instance->adapter_type >= INVADER_SERIES) sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT; else sg_chain->Flags = (IEEE_SGE_FLAGS_CHAIN_ELEMENT | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR); sg_chain->Length = cpu_to_le32((sizeof(union MPI2_SGE_IO_UNION) * (sge_count - sg_processed))); sg_chain->Address = cpu_to_le64(cmd->sg_frame_phys_addr); sgl_ptr = (struct MPI25_IEEE_SGE_CHAIN64 *)cmd->sg_frame; memset(sgl_ptr, 0, instance->max_chain_frame_sz); } } } /** * megasas_make_sgl - Build Scatter Gather List(SGLs) * @scp: SCSI command pointer * @instance: Soft instance of controller * @cmd: Fusion command pointer * * This function will build sgls based on device type. * For nvme drives, there is different way of building sgls in nvme native * format- PRPs(Physical Region Page). * * Returns the number of sg lists actually used, zero if the sg lists * is NULL, or -ENOMEM if the mapping failed */ static int megasas_make_sgl(struct megasas_instance *instance, struct scsi_cmnd *scp, struct megasas_cmd_fusion *cmd) { int sge_count; bool build_prp = false; struct MPI25_IEEE_SGE_CHAIN64 *sgl_chain64; sge_count = scsi_dma_map(scp); if ((sge_count > instance->max_num_sge) || (sge_count <= 0)) return sge_count; sgl_chain64 = (struct MPI25_IEEE_SGE_CHAIN64 *)&cmd->io_request->SGL; if ((le16_to_cpu(cmd->io_request->IoFlags) & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) && (cmd->pd_interface == NVME_PD)) build_prp = megasas_make_prp_nvme(instance, scp, sgl_chain64, cmd, sge_count); if (!build_prp) megasas_make_sgl_fusion(instance, scp, sgl_chain64, cmd, sge_count); return sge_count; } /** * megasas_set_pd_lba - Sets PD LBA * @io_request: IO request * @cdb_len: cdb length * @io_info: IO information * @scp: SCSI command * @local_map_ptr: Raid map * @ref_tag: Primary reference tag * * Used to set the PD LBA in CDB for FP IOs */ static void megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len, struct IO_REQUEST_INFO *io_info, struct scsi_cmnd *scp, struct MR_DRV_RAID_MAP_ALL *local_map_ptr, u32 ref_tag) { struct MR_LD_RAID *raid; u16 ld; u64 start_blk = io_info->pdBlock; u8 *cdb = io_request->CDB.CDB32; u32 num_blocks = io_info->numBlocks; u8 opcode = 0, flagvals = 0, groupnum = 0, control = 0; /* Check if T10 PI (DIF) is enabled for this LD */ ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr); raid = MR_LdRaidGet(ld, local_map_ptr); if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) { memset(cdb, 0, sizeof(io_request->CDB.CDB32)); cdb[0] = MEGASAS_SCSI_VARIABLE_LENGTH_CMD; cdb[7] = MEGASAS_SCSI_ADDL_CDB_LEN; if (scp->sc_data_direction == DMA_FROM_DEVICE) cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_READ32; else cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_WRITE32; cdb[10] = MEGASAS_RD_WR_PROTECT_CHECK_ALL; /* LBA */ cdb[12] = (u8)((start_blk >> 56) & 0xff); cdb[13] = (u8)((start_blk >> 48) & 0xff); cdb[14] = (u8)((start_blk >> 40) & 0xff); cdb[15] = (u8)((start_blk >> 32) & 0xff); cdb[16] = (u8)((start_blk >> 24) & 0xff); cdb[17] = (u8)((start_blk >> 16) & 0xff); cdb[18] = (u8)((start_blk >> 8) & 0xff); cdb[19] = (u8)(start_blk & 0xff); /* Logical block reference tag */ io_request->CDB.EEDP32.PrimaryReferenceTag = cpu_to_be32(ref_tag); io_request->CDB.EEDP32.PrimaryApplicationTagMask = cpu_to_be16(0xffff); io_request->IoFlags = cpu_to_le16(32); /* Specify 32-byte cdb */ /* Transfer length */ cdb[28] = (u8)((num_blocks >> 24) & 0xff); cdb[29] = (u8)((num_blocks >> 16) & 0xff); cdb[30] = (u8)((num_blocks >> 8) & 0xff); cdb[31] = (u8)(num_blocks & 0xff); /* set SCSI IO EEDPFlags */ if (scp->sc_data_direction == DMA_FROM_DEVICE) { io_request->EEDPFlags = cpu_to_le16( MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP | MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG | MPI25_SCSIIO_EEDPFLAGS_DO_NOT_DISABLE_MODE | MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD); } else { io_request->EEDPFlags = cpu_to_le16( MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | MPI2_SCSIIO_EEDPFLAGS_INSERT_OP); } io_request->Control |= cpu_to_le32((0x4 << 26)); io_request->EEDPBlockSize = cpu_to_le32(scp->device->sector_size); } else { /* Some drives don't support 16/12 byte CDB's, convert to 10 */ if (((cdb_len == 12) || (cdb_len == 16)) && (start_blk <= 0xffffffff)) { if (cdb_len == 16) { opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10; flagvals = cdb[1]; groupnum = cdb[14]; control = cdb[15]; } else { opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10; flagvals = cdb[1]; groupnum = cdb[10]; control = cdb[11]; } memset(cdb, 0, sizeof(io_request->CDB.CDB32)); cdb[0] = opcode; cdb[1] = flagvals; cdb[6] = groupnum; cdb[9] = control; /* Transfer length */ cdb[8] = (u8)(num_blocks & 0xff); cdb[7] = (u8)((num_blocks >> 8) & 0xff); io_request->IoFlags = cpu_to_le16(10); /* Specify 10-byte cdb */ cdb_len = 10; } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) { /* Convert to 16 byte CDB for large LBA's */ switch (cdb_len) { case 6: opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16; control = cdb[5]; break; case 10: opcode = cdb[0] == READ_10 ? READ_16 : WRITE_16; flagvals = cdb[1]; groupnum = cdb[6]; control = cdb[9]; break; case 12: opcode = cdb[0] == READ_12 ? READ_16 : WRITE_16; flagvals = cdb[1]; groupnum = cdb[10]; control = cdb[11]; break; } memset(cdb, 0, sizeof(io_request->CDB.CDB32)); cdb[0] = opcode; cdb[1] = flagvals; cdb[14] = groupnum; cdb[15] = control; /* Transfer length */ cdb[13] = (u8)(num_blocks & 0xff); cdb[12] = (u8)((num_blocks >> 8) & 0xff); cdb[11] = (u8)((num_blocks >> 16) & 0xff); cdb[10] = (u8)((num_blocks >> 24) & 0xff); io_request->IoFlags = cpu_to_le16(16); /* Specify 16-byte cdb */ cdb_len = 16; } /* Normal case, just load LBA here */ switch (cdb_len) { case 6: { u8 val = cdb[1] & 0xE0; cdb[3] = (u8)(start_blk & 0xff); cdb[2] = (u8)((start_blk >> 8) & 0xff); cdb[1] = val | ((u8)(start_blk >> 16) & 0x1f); break; } case 10: cdb[5] = (u8)(start_blk & 0xff); cdb[4] = (u8)((start_blk >> 8) & 0xff); cdb[3] = (u8)((start_blk >> 16) & 0xff); cdb[2] = (u8)((start_blk >> 24) & 0xff); break; case 12: cdb[5] = (u8)(start_blk & 0xff); cdb[4] = (u8)((start_blk >> 8) & 0xff); cdb[3] = (u8)((start_blk >> 16) & 0xff); cdb[2] = (u8)((start_blk >> 24) & 0xff); break; case 16: cdb[9] = (u8)(start_blk & 0xff); cdb[8] = (u8)((start_blk >> 8) & 0xff); cdb[7] = (u8)((start_blk >> 16) & 0xff); cdb[6] = (u8)((start_blk >> 24) & 0xff); cdb[5] = (u8)((start_blk >> 32) & 0xff); cdb[4] = (u8)((start_blk >> 40) & 0xff); cdb[3] = (u8)((start_blk >> 48) & 0xff); cdb[2] = (u8)((start_blk >> 56) & 0xff); break; } } } /** * megasas_stream_detect - stream detection on read and and write IOs * @instance: Adapter soft state * @cmd: Command to be prepared * @io_info: IO Request info * */ /** stream detection on read and and write IOs */ static void megasas_stream_detect(struct megasas_instance *instance, struct megasas_cmd_fusion *cmd, struct IO_REQUEST_INFO *io_info) { struct fusion_context *fusion = instance->ctrl_context; u32 device_id = io_info->ldTgtId; struct LD_STREAM_DETECT *current_ld_sd = fusion->stream_detect_by_ld[device_id]; u32 *track_stream = &current_ld_sd->mru_bit_map, stream_num; u32 shifted_values, unshifted_values; u32 index_value_mask, shifted_values_mask; int i; bool is_read_ahead = false; struct STREAM_DETECT *current_sd; /* find possible stream */ for (i = 0; i < MAX_STREAMS_TRACKED; ++i) { stream_num = (*track_stream >> (i * BITS_PER_INDEX_STREAM)) & STREAM_MASK; current_sd = &current_ld_sd->stream_track[stream_num]; /* if we found a stream, update the raid * context and also update the mruBitMap */ /* boundary condition */ if ((current_sd->next_seq_lba) && (io_info->ldStartBlock >= current_sd->next_seq_lba) && (io_info->ldStartBlock <= (current_sd->next_seq_lba + 32)) && (current_sd->is_read == io_info->isRead)) { if ((io_info->ldStartBlock != current_sd->next_seq_lba) && ((!io_info->isRead) || (!is_read_ahead))) /* * Once the API is available we need to change this. * At this point we are not allowing any gap */ continue; SET_STREAM_DETECTED(cmd->io_request->RaidContext.raid_context_g35); current_sd->next_seq_lba = io_info->ldStartBlock + io_info->numBlocks; /* * update the mruBitMap LRU */ shifted_values_mask = (1 << i * BITS_PER_INDEX_STREAM) - 1; shifted_values = ((*track_stream & shifted_values_mask) << BITS_PER_INDEX_STREAM); index_value_mask = STREAM_MASK << i * BITS_PER_INDEX_STREAM; unshifted_values = *track_stream & ~(shifted_values_mask | index_value_mask); *track_stream = unshifted_values | shifted_values | stream_num; return; } } /* * if we did not find any stream, create a new one * from the least recently used */ stream_num = (*track_stream >> ((MAX_STREAMS_TRACKED - 1) * BITS_PER_INDEX_STREAM)) & STREAM_MASK; current_sd = &current_ld_sd->stream_track[stream_num]; current_sd->is_read = io_info->isRead; current_sd->next_seq_lba = io_info->ldStartBlock + io_info->numBlocks; *track_stream = (((*track_stream & ZERO_LAST_STREAM) << 4) | stream_num); return; } /** * megasas_set_raidflag_cpu_affinity - This function sets the cpu * affinity (cpu of the controller) and raid_flags in the raid context * based on IO type. * * @fusion: Fusion context * @praid_context: IO RAID context * @raid: LD raid map * @fp_possible: Is fast path possible? * @is_read: Is read IO? * @scsi_buff_len: SCSI command buffer length * */ static void megasas_set_raidflag_cpu_affinity(struct fusion_context *fusion, union RAID_CONTEXT_UNION *praid_context, struct MR_LD_RAID *raid, bool fp_possible, u8 is_read, u32 scsi_buff_len) { u8 cpu_sel = MR_RAID_CTX_CPUSEL_0; struct RAID_CONTEXT_G35 *rctx_g35; rctx_g35 = &praid_context->raid_context_g35; if (fp_possible) { if (is_read) { if ((raid->cpuAffinity.pdRead.cpu0) && (raid->cpuAffinity.pdRead.cpu1)) cpu_sel = MR_RAID_CTX_CPUSEL_FCFS; else if (raid->cpuAffinity.pdRead.cpu1) cpu_sel = MR_RAID_CTX_CPUSEL_1; } else { if ((raid->cpuAffinity.pdWrite.cpu0) && (raid->cpuAffinity.pdWrite.cpu1)) cpu_sel = MR_RAID_CTX_CPUSEL_FCFS; else if (raid->cpuAffinity.pdWrite.cpu1) cpu_sel = MR_RAID_CTX_CPUSEL_1; /* Fast path cache by pass capable R0/R1 VD */ if ((raid->level <= 1) && (raid->capability.fp_cache_bypass_capable)) { rctx_g35->routing_flags |= (1 << MR_RAID_CTX_ROUTINGFLAGS_SLD_SHIFT); rctx_g35->raid_flags = (MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT); } } } else { if (is_read) { if ((raid->cpuAffinity.ldRead.cpu0) && (raid->cpuAffinity.ldRead.cpu1)) cpu_sel = MR_RAID_CTX_CPUSEL_FCFS; else if (raid->cpuAffinity.ldRead.cpu1) cpu_sel = MR_RAID_CTX_CPUSEL_1; } else { if ((raid->cpuAffinity.ldWrite.cpu0) && (raid->cpuAffinity.ldWrite.cpu1)) cpu_sel = MR_RAID_CTX_CPUSEL_FCFS; else if (raid->cpuAffinity.ldWrite.cpu1) cpu_sel = MR_RAID_CTX_CPUSEL_1; if (is_stream_detected(rctx_g35) && ((raid->level == 5) || (raid->level == 6)) && (raid->writeMode == MR_RL_WRITE_THROUGH_MODE) && (cpu_sel == MR_RAID_CTX_CPUSEL_FCFS)) cpu_sel = MR_RAID_CTX_CPUSEL_0; } } rctx_g35->routing_flags |= (cpu_sel << MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT); /* Always give priority to MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT * vs MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS. * IO Subtype is not bitmap. */ if ((fusion->pcie_bw_limitation) && (raid->level == 1) && (!is_read) && (scsi_buff_len > MR_LARGE_IO_MIN_SIZE)) { praid_context->raid_context_g35.raid_flags = (MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT); } } /** * megasas_build_ldio_fusion - Prepares IOs to devices * @instance: Adapter soft state * @scp: SCSI command * @cmd: Command to be prepared * * Prepares the io_request and chain elements (sg_frame) for IO * The IO can be for PD (Fast Path) or LD */ static void megasas_build_ldio_fusion(struct megasas_instance *instance, struct scsi_cmnd *scp, struct megasas_cmd_fusion *cmd) { bool fp_possible; u16 ld; u32 start_lba_lo, start_lba_hi, device_id, datalength = 0; u32 scsi_buff_len; struct MPI2_RAID_SCSI_IO_REQUEST *io_request; struct IO_REQUEST_INFO io_info; struct fusion_context *fusion; struct MR_DRV_RAID_MAP_ALL *local_map_ptr; u8 *raidLUN; unsigned long spinlock_flags; struct MR_LD_RAID *raid = NULL; struct MR_PRIV_DEVICE *mrdev_priv; struct RAID_CONTEXT *rctx; struct RAID_CONTEXT_G35 *rctx_g35; device_id = MEGASAS_DEV_INDEX(scp); fusion = instance->ctrl_context; io_request = cmd->io_request; rctx = &io_request->RaidContext.raid_context; rctx_g35 = &io_request->RaidContext.raid_context_g35; rctx->virtual_disk_tgt_id = cpu_to_le16(device_id); rctx->status = 0; rctx->ex_status = 0; start_lba_lo = 0; start_lba_hi = 0; fp_possible = false; /* * 6-byte READ(0x08) or WRITE(0x0A) cdb */ if (scp->cmd_len == 6) { datalength = (u32) scp->cmnd[4]; start_lba_lo = ((u32) scp->cmnd[1] << 16) | ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3]; start_lba_lo &= 0x1FFFFF; } /* * 10-byte READ(0x28) or WRITE(0x2A) cdb */ else if (scp->cmd_len == 10) { datalength = (u32) scp->cmnd[8] | ((u32) scp->cmnd[7] << 8); start_lba_lo = ((u32) scp->cmnd[2] << 24) | ((u32) scp->cmnd[3] << 16) | ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; } /* * 12-byte READ(0xA8) or WRITE(0xAA) cdb */ else if (scp->cmd_len == 12) { datalength = ((u32) scp->cmnd[6] << 24) | ((u32) scp->cmnd[7] << 16) | ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]; start_lba_lo = ((u32) scp->cmnd[2] << 24) | ((u32) scp->cmnd[3] << 16) | ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; } /* * 16-byte READ(0x88) or WRITE(0x8A) cdb */ else if (scp->cmd_len == 16) { datalength = ((u32) scp->cmnd[10] << 24) | ((u32) scp->cmnd[11] << 16) | ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13]; start_lba_lo = ((u32) scp->cmnd[6] << 24) | ((u32) scp->cmnd[7] << 16) | ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]; start_lba_hi = ((u32) scp->cmnd[2] << 24) | ((u32) scp->cmnd[3] << 16) | ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; } memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO)); io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo; io_info.numBlocks = datalength; io_info.ldTgtId = device_id; io_info.r1_alt_dev_handle = MR_DEVHANDLE_INVALID; scsi_buff_len = scsi_bufflen(scp); io_request->DataLength = cpu_to_le32(scsi_buff_len); io_info.data_arms = 1; if (scp->sc_data_direction == DMA_FROM_DEVICE) io_info.isRead = 1; local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; ld = MR_TargetIdToLdGet(device_id, local_map_ptr); if (ld < instance->fw_supported_vd_count) raid = MR_LdRaidGet(ld, local_map_ptr); if (!raid || (!fusion->fast_path_io)) { rctx->reg_lock_flags = 0; fp_possible = false; } else { if (MR_BuildRaidContext(instance, &io_info, rctx, local_map_ptr, &raidLUN)) fp_possible = (io_info.fpOkForIo > 0) ? true : false; } megasas_get_msix_index(instance, scp, cmd, io_info.data_arms); if (instance->adapter_type >= VENTURA_SERIES) { /* FP for Optimal raid level 1. * All large RAID-1 writes (> 32 KiB, both WT and WB modes) * are built by the driver as LD I/Os. * All small RAID-1 WT writes (<= 32 KiB) are built as FP I/Os * (there is never a reason to process these as buffered writes) * All small RAID-1 WB writes (<= 32 KiB) are built as FP I/Os * with the SLD bit asserted. */ if (io_info.r1_alt_dev_handle != MR_DEVHANDLE_INVALID) { mrdev_priv = scp->device->hostdata; if (atomic_inc_return(&instance->fw_outstanding) > (instance->host->can_queue)) { fp_possible = false; atomic_dec(&instance->fw_outstanding); } else if (fusion->pcie_bw_limitation && ((scsi_buff_len > MR_LARGE_IO_MIN_SIZE) || (atomic_dec_if_positive(&mrdev_priv->r1_ldio_hint) > 0))) { fp_possible = false; atomic_dec(&instance->fw_outstanding); if (scsi_buff_len > MR_LARGE_IO_MIN_SIZE) atomic_set(&mrdev_priv->r1_ldio_hint, instance->r1_ldio_hint_default); } } if (!fp_possible || (io_info.isRead && io_info.ra_capable)) { spin_lock_irqsave(&instance->stream_lock, spinlock_flags); megasas_stream_detect(instance, cmd, &io_info); spin_unlock_irqrestore(&instance->stream_lock, spinlock_flags); /* In ventura if stream detected for a read and it is * read ahead capable make this IO as LDIO */ if (is_stream_detected(rctx_g35)) fp_possible = false; } /* If raid is NULL, set CPU affinity to default CPU0 */ if (raid) megasas_set_raidflag_cpu_affinity(fusion, &io_request->RaidContext, raid, fp_possible, io_info.isRead, scsi_buff_len); else rctx_g35->routing_flags |= (MR_RAID_CTX_CPUSEL_0 << MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT); } if (fp_possible) { megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp, local_map_ptr, start_lba_lo); io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; cmd->request_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_FP_IO << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); if (instance->adapter_type == INVADER_SERIES) { rctx->type = MPI2_TYPE_CUDA; rctx->nseg = 0x1; io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); rctx->reg_lock_flags |= (MR_RL_FLAGS_GRANT_DESTINATION_CUDA | MR_RL_FLAGS_SEQ_NUM_ENABLE); } else if (instance->adapter_type >= VENTURA_SERIES) { rctx_g35->nseg_type |= (1 << RAID_CONTEXT_NSEG_SHIFT); rctx_g35->nseg_type |= (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT); rctx_g35->routing_flags |= (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT); io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); } if (fusion->load_balance_info && (fusion->load_balance_info[device_id].loadBalanceFlag) && (io_info.isRead)) { io_info.devHandle = get_updated_dev_handle(instance, &fusion->load_balance_info[device_id], &io_info, local_map_ptr); megasas_priv(scp)->status |= MEGASAS_LOAD_BALANCE_FLAG; cmd->pd_r1_lb = io_info.pd_after_lb; if (instance->adapter_type >= VENTURA_SERIES) rctx_g35->span_arm = io_info.span_arm; else rctx->span_arm = io_info.span_arm; } else megasas_priv(scp)->status &= ~MEGASAS_LOAD_BALANCE_FLAG; if (instance->adapter_type >= VENTURA_SERIES) cmd->r1_alt_dev_handle = io_info.r1_alt_dev_handle; else cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID; if ((raidLUN[0] == 1) && (local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].validHandles > 1)) { instance->dev_handle = !(instance->dev_handle); io_info.devHandle = local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].devHandle[instance->dev_handle]; } cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle; io_request->DevHandle = io_info.devHandle; cmd->pd_interface = io_info.pd_interface; /* populate the LUN field */ memcpy(io_request->LUN, raidLUN, 8); } else { rctx->timeout_value = cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec); cmd->request_desc->SCSIIO.RequestFlags = (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); if (instance->adapter_type == INVADER_SERIES) { if (io_info.do_fp_rlbypass || (rctx->reg_lock_flags == REGION_TYPE_UNUSED)) cmd->request_desc->SCSIIO.RequestFlags = (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); rctx->type = MPI2_TYPE_CUDA; rctx->reg_lock_flags |= (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 | MR_RL_FLAGS_SEQ_NUM_ENABLE); rctx->nseg = 0x1; } else if (instance->adapter_type >= VENTURA_SERIES) { rctx_g35->routing_flags |= (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT); rctx_g35->nseg_type |= (1 << RAID_CONTEXT_NSEG_SHIFT); rctx_g35->nseg_type |= (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT); } io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; io_request->DevHandle = cpu_to_le16(device_id); } /* Not FP */ } /** * megasas_build_ld_nonrw_fusion - prepares non rw ios for virtual disk * @instance: Adapter soft state * @scmd: SCSI command * @cmd: Command to be prepared * * Prepares the io_request frame for non-rw io cmds for vd. */ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance, struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd) { u32 device_id; struct MPI2_RAID_SCSI_IO_REQUEST *io_request; u16 ld; struct MR_DRV_RAID_MAP_ALL *local_map_ptr; struct fusion_context *fusion = instance->ctrl_context; u8 span, physArm; __le16 devHandle; u32 arRef, pd; struct MR_LD_RAID *raid; struct RAID_CONTEXT *pRAID_Context; u8 fp_possible = 1; io_request = cmd->io_request; device_id = MEGASAS_DEV_INDEX(scmd); local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); /* get RAID_Context pointer */ pRAID_Context = &io_request->RaidContext.raid_context; /* Check with FW team */ pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id); pRAID_Context->reg_lock_row_lba = 0; pRAID_Context->reg_lock_length = 0; if (fusion->fast_path_io && ( device_id < instance->fw_supported_vd_count)) { ld = MR_TargetIdToLdGet(device_id, local_map_ptr); if (ld >= instance->fw_supported_vd_count - 1) fp_possible = 0; else { raid = MR_LdRaidGet(ld, local_map_ptr); if (!(raid->capability.fpNonRWCapable)) fp_possible = 0; } } else fp_possible = 0; if (!fp_possible) { io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; io_request->DevHandle = cpu_to_le16(device_id); io_request->LUN[1] = scmd->device->lun; pRAID_Context->timeout_value = cpu_to_le16(scsi_cmd_to_rq(scmd)->timeout / HZ); cmd->request_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); } else { /* set RAID context values */ pRAID_Context->config_seq_num = raid->seqNum; if (instance->adapter_type < VENTURA_SERIES) pRAID_Context->reg_lock_flags = REGION_TYPE_SHARED_READ; pRAID_Context->timeout_value = cpu_to_le16(raid->fpIoTimeoutForLd); /* get the DevHandle for the PD (since this is fpNonRWCapable, this is a single disk RAID0) */ span = physArm = 0; arRef = MR_LdSpanArrayGet(ld, span, local_map_ptr); pd = MR_ArPdGet(arRef, physArm, local_map_ptr); devHandle = MR_PdDevHandleGet(pd, local_map_ptr); /* build request descriptor */ cmd->request_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_FP_IO << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); cmd->request_desc->SCSIIO.DevHandle = devHandle; /* populate the LUN field */ memcpy(io_request->LUN, raid->LUN, 8); /* build the raidScsiIO structure */ io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; io_request->DevHandle = devHandle; } } /** * megasas_build_syspd_fusion - prepares rw/non-rw ios for syspd * @instance: Adapter soft state * @scmd: SCSI command * @cmd: Command to be prepared * @fp_possible: parameter to detect fast path or firmware path io. * * Prepares the io_request frame for rw/non-rw io cmds for syspds */ static void megasas_build_syspd_fusion(struct megasas_instance *instance, struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd, bool fp_possible) { u32 device_id; struct MPI2_RAID_SCSI_IO_REQUEST *io_request; u16 pd_index = 0; u16 os_timeout_value; u16 timeout_limit; struct MR_DRV_RAID_MAP_ALL *local_map_ptr; struct RAID_CONTEXT *pRAID_Context; struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; struct MR_PRIV_DEVICE *mr_device_priv_data; struct fusion_context *fusion = instance->ctrl_context; pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id - 1) & 1]; device_id = MEGASAS_DEV_INDEX(scmd); pd_index = MEGASAS_PD_INDEX(scmd); os_timeout_value = scsi_cmd_to_rq(scmd)->timeout / HZ; mr_device_priv_data = scmd->device->hostdata; cmd->pd_interface = mr_device_priv_data->interface_type; io_request = cmd->io_request; /* get RAID_Context pointer */ pRAID_Context = &io_request->RaidContext.raid_context; pRAID_Context->reg_lock_flags = 0; pRAID_Context->reg_lock_row_lba = 0; pRAID_Context->reg_lock_length = 0; io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); io_request->LUN[1] = scmd->device->lun; pRAID_Context->raid_flags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT; /* If FW supports PD sequence number */ if (instance->support_seqnum_jbod_fp) { if (instance->use_seqnum_jbod_fp && instance->pd_list[pd_index].driveType == TYPE_DISK) { /* More than 256 PD/JBOD support for Ventura */ if (instance->support_morethan256jbod) pRAID_Context->virtual_disk_tgt_id = pd_sync->seq[pd_index].pd_target_id; else pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id + (MAX_PHYSICAL_DEVICES - 1)); pRAID_Context->config_seq_num = pd_sync->seq[pd_index].seqNum; io_request->DevHandle = pd_sync->seq[pd_index].devHandle; if (instance->adapter_type >= VENTURA_SERIES) { io_request->RaidContext.raid_context_g35.routing_flags |= (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT); io_request->RaidContext.raid_context_g35.nseg_type |= (1 << RAID_CONTEXT_NSEG_SHIFT); io_request->RaidContext.raid_context_g35.nseg_type |= (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT); } else { pRAID_Context->type = MPI2_TYPE_CUDA; pRAID_Context->nseg = 0x1; pRAID_Context->reg_lock_flags |= (MR_RL_FLAGS_SEQ_NUM_ENABLE | MR_RL_FLAGS_GRANT_DESTINATION_CUDA); } } else { pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id + (MAX_PHYSICAL_DEVICES - 1)); pRAID_Context->config_seq_num = 0; io_request->DevHandle = cpu_to_le16(0xFFFF); } } else { pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id); pRAID_Context->config_seq_num = 0; if (fusion->fast_path_io) { local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; io_request->DevHandle = local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl; } else { io_request->DevHandle = cpu_to_le16(0xFFFF); } } cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle; megasas_get_msix_index(instance, scmd, cmd, 1); if (!fp_possible) { /* system pd firmware path */ io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; cmd->request_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); pRAID_Context->timeout_value = cpu_to_le16(os_timeout_value); pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id); } else { if (os_timeout_value) os_timeout_value++; /* system pd Fast Path */ io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; timeout_limit = (scmd->device->type == TYPE_DISK) ? 255 : 0xFFFF; pRAID_Context->timeout_value = cpu_to_le16((os_timeout_value > timeout_limit) ? timeout_limit : os_timeout_value); if (instance->adapter_type >= INVADER_SERIES) io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); cmd->request_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_FP_IO << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); } } /** * megasas_build_io_fusion - Prepares IOs to devices * @instance: Adapter soft state * @scp: SCSI command * @cmd: Command to be prepared * * Invokes helper functions to prepare request frames * and sets flags appropriate for IO/Non-IO cmd */ static int megasas_build_io_fusion(struct megasas_instance *instance, struct scsi_cmnd *scp, struct megasas_cmd_fusion *cmd) { int sge_count; u16 pd_index = 0; u8 drive_type = 0; struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request; struct MR_PRIV_DEVICE *mr_device_priv_data; mr_device_priv_data = scp->device->hostdata; /* Zero out some fields so they don't get reused */ memset(io_request->LUN, 0x0, 8); io_request->CDB.EEDP32.PrimaryReferenceTag = 0; io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0; io_request->EEDPFlags = 0; io_request->Control = 0; io_request->EEDPBlockSize = 0; io_request->ChainOffset = 0; io_request->RaidContext.raid_context.raid_flags = 0; io_request->RaidContext.raid_context.type = 0; io_request->RaidContext.raid_context.nseg = 0; memcpy(io_request->CDB.CDB32, scp->cmnd, scp->cmd_len); /* * Just the CDB length,rest of the Flags are zero * This will be modified for FP in build_ldio_fusion */ io_request->IoFlags = cpu_to_le16(scp->cmd_len); switch (megasas_cmd_type(scp)) { case READ_WRITE_LDIO: megasas_build_ldio_fusion(instance, scp, cmd); break; case NON_READ_WRITE_LDIO: megasas_build_ld_nonrw_fusion(instance, scp, cmd); break; case READ_WRITE_SYSPDIO: megasas_build_syspd_fusion(instance, scp, cmd, true); break; case NON_READ_WRITE_SYSPDIO: pd_index = MEGASAS_PD_INDEX(scp); drive_type = instance->pd_list[pd_index].driveType; if ((instance->secure_jbod_support || mr_device_priv_data->is_tm_capable) || (instance->adapter_type >= VENTURA_SERIES && drive_type == TYPE_ENCLOSURE)) megasas_build_syspd_fusion(instance, scp, cmd, false); else megasas_build_syspd_fusion(instance, scp, cmd, true); break; default: break; } /* * Construct SGL */ sge_count = megasas_make_sgl(instance, scp, cmd); if (sge_count > instance->max_num_sge || (sge_count < 0)) { dev_err(&instance->pdev->dev, "%s %d sge_count (%d) is out of range. Range is: 0-%d\n", __func__, __LINE__, sge_count, instance->max_num_sge); return 1; } if (instance->adapter_type >= VENTURA_SERIES) { set_num_sge(&io_request->RaidContext.raid_context_g35, sge_count); cpu_to_le16s(&io_request->RaidContext.raid_context_g35.routing_flags); cpu_to_le16s(&io_request->RaidContext.raid_context_g35.nseg_type); } else { /* numSGE store lower 8 bit of sge_count. * numSGEExt store higher 8 bit of sge_count */ io_request->RaidContext.raid_context.num_sge = sge_count; io_request->RaidContext.raid_context.num_sge_ext = (u8)(sge_count >> 8); } io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING); if (scp->sc_data_direction == DMA_TO_DEVICE) io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_WRITE); else if (scp->sc_data_direction == DMA_FROM_DEVICE) io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_READ); io_request->SGLOffset0 = offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4; io_request->SenseBufferLowAddress = cpu_to_le32(lower_32_bits(cmd->sense_phys_addr)); io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; cmd->scmd = scp; megasas_priv(scp)->cmd_priv = cmd; return 0; } static union MEGASAS_REQUEST_DESCRIPTOR_UNION * megasas_get_request_descriptor(struct megasas_instance *instance, u16 index) { u8 *p; struct fusion_context *fusion; fusion = instance->ctrl_context; p = fusion->req_frames_desc + sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) * index; return (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)p; } /* megasas_prepate_secondRaid1_IO * It prepares the raid 1 second IO */ static void megasas_prepare_secondRaid1_IO(struct megasas_instance *instance, struct megasas_cmd_fusion *cmd, struct megasas_cmd_fusion *r1_cmd) { union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc, *req_desc2 = NULL; struct fusion_context *fusion; fusion = instance->ctrl_context; req_desc = cmd->request_desc; /* copy the io request frame as well as 8 SGEs data for r1 command*/ memcpy(r1_cmd->io_request, cmd->io_request, (sizeof(struct MPI2_RAID_SCSI_IO_REQUEST))); memcpy(r1_cmd->io_request->SGLs, cmd->io_request->SGLs, (fusion->max_sge_in_main_msg * sizeof(union MPI2_SGE_IO_UNION))); /*sense buffer is different for r1 command*/ r1_cmd->io_request->SenseBufferLowAddress = cpu_to_le32(lower_32_bits(r1_cmd->sense_phys_addr)); r1_cmd->scmd = cmd->scmd; req_desc2 = megasas_get_request_descriptor(instance, (r1_cmd->index - 1)); req_desc2->Words = 0; r1_cmd->request_desc = req_desc2; req_desc2->SCSIIO.SMID = cpu_to_le16(r1_cmd->index); req_desc2->SCSIIO.RequestFlags = req_desc->SCSIIO.RequestFlags; r1_cmd->request_desc->SCSIIO.DevHandle = cmd->r1_alt_dev_handle; r1_cmd->io_request->DevHandle = cmd->r1_alt_dev_handle; r1_cmd->r1_alt_dev_handle = cmd->io_request->DevHandle; cmd->io_request->RaidContext.raid_context_g35.flow_specific.peer_smid = cpu_to_le16(r1_cmd->index); r1_cmd->io_request->RaidContext.raid_context_g35.flow_specific.peer_smid = cpu_to_le16(cmd->index); /*MSIxIndex of both commands request descriptors should be same*/ r1_cmd->request_desc->SCSIIO.MSIxIndex = cmd->request_desc->SCSIIO.MSIxIndex; /*span arm is different for r1 cmd*/ r1_cmd->io_request->RaidContext.raid_context_g35.span_arm = cmd->io_request->RaidContext.raid_context_g35.span_arm + 1; } /** * megasas_build_and_issue_cmd_fusion -Main routine for building and * issuing non IOCTL cmd * @instance: Adapter soft state * @scmd: pointer to scsi cmd from OS */ static u32 megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance, struct scsi_cmnd *scmd) { struct megasas_cmd_fusion *cmd, *r1_cmd = NULL; union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; u32 index; if ((megasas_cmd_type(scmd) == READ_WRITE_LDIO) && instance->ldio_threshold && (atomic_inc_return(&instance->ldio_outstanding) > instance->ldio_threshold)) { atomic_dec(&instance->ldio_outstanding); return SCSI_MLQUEUE_DEVICE_BUSY; } if (atomic_inc_return(&instance->fw_outstanding) > instance->host->can_queue) { atomic_dec(&instance->fw_outstanding); return SCSI_MLQUEUE_HOST_BUSY; } cmd = megasas_get_cmd_fusion(instance, scsi_cmd_to_rq(scmd)->tag); if (!cmd) { atomic_dec(&instance->fw_outstanding); return SCSI_MLQUEUE_HOST_BUSY; } index = cmd->index; req_desc = megasas_get_request_descriptor(instance, index-1); req_desc->Words = 0; cmd->request_desc = req_desc; if (megasas_build_io_fusion(instance, scmd, cmd)) { megasas_return_cmd_fusion(instance, cmd); dev_err(&instance->pdev->dev, "Error building command\n"); cmd->request_desc = NULL; atomic_dec(&instance->fw_outstanding); return SCSI_MLQUEUE_HOST_BUSY; } req_desc = cmd->request_desc; req_desc->SCSIIO.SMID = cpu_to_le16(index); if (cmd->io_request->ChainOffset != 0 && cmd->io_request->ChainOffset != 0xF) dev_err(&instance->pdev->dev, "The chain offset value is not " "correct : %x\n", cmd->io_request->ChainOffset); /* * if it is raid 1/10 fp write capable. * try to get second command from pool and construct it. * From FW, it has confirmed that lba values of two PDs * corresponds to single R1/10 LD are always same * */ /* driver side count always should be less than max_fw_cmds * to get new command */ if (cmd->r1_alt_dev_handle != MR_DEVHANDLE_INVALID) { r1_cmd = megasas_get_cmd_fusion(instance, scsi_cmd_to_rq(scmd)->tag + instance->max_fw_cmds); megasas_prepare_secondRaid1_IO(instance, cmd, r1_cmd); } /* * Issue the command to the FW */ megasas_sdev_busy_inc(instance, scmd); megasas_fire_cmd_fusion(instance, req_desc); if (r1_cmd) megasas_fire_cmd_fusion(instance, r1_cmd->request_desc); return 0; } /** * megasas_complete_r1_command - * completes R1 FP write commands which has valid peer smid * @instance: Adapter soft state * @cmd: MPT command frame * */ static inline void megasas_complete_r1_command(struct megasas_instance *instance, struct megasas_cmd_fusion *cmd) { u8 *sense, status, ex_status; u32 data_length; u16 peer_smid; struct fusion_context *fusion; struct megasas_cmd_fusion *r1_cmd = NULL; struct scsi_cmnd *scmd_local = NULL; struct RAID_CONTEXT_G35 *rctx_g35; rctx_g35 = &cmd->io_request->RaidContext.raid_context_g35; fusion = instance->ctrl_context; peer_smid = le16_to_cpu(rctx_g35->flow_specific.peer_smid); r1_cmd = fusion->cmd_list[peer_smid - 1]; scmd_local = cmd->scmd; status = rctx_g35->status; ex_status = rctx_g35->ex_status; data_length = cmd->io_request->DataLength; sense = cmd->sense; cmd->cmd_completed = true; /* Check if peer command is completed or not*/ if (r1_cmd->cmd_completed) { rctx_g35 = &r1_cmd->io_request->RaidContext.raid_context_g35; if (rctx_g35->status != MFI_STAT_OK) { status = rctx_g35->status; ex_status = rctx_g35->ex_status; data_length = r1_cmd->io_request->DataLength; sense = r1_cmd->sense; } megasas_return_cmd_fusion(instance, r1_cmd); map_cmd_status(fusion, scmd_local, status, ex_status, le32_to_cpu(data_length), sense); if (instance->ldio_threshold && megasas_cmd_type(scmd_local) == READ_WRITE_LDIO) atomic_dec(&instance->ldio_outstanding); megasas_priv(scmd_local)->cmd_priv = NULL; megasas_return_cmd_fusion(instance, cmd); scsi_dma_unmap(scmd_local); megasas_sdev_busy_dec(instance, scmd_local); scsi_done(scmd_local); } } /** * access_irq_context: Access to reply processing * @irq_context: IRQ context * * Synchronize access to reply processing. * * Return: true on success, false on failure. */ static inline bool access_irq_context(struct megasas_irq_context *irq_context) { if (!irq_context) return true; if (atomic_add_unless(&irq_context->in_used, 1, 1)) return true; return false; } /** * release_irq_context: Release reply processing * @irq_context: IRQ context * * Release access of reply processing. * * Return: Nothing. */ static inline void release_irq_context(struct megasas_irq_context *irq_context) { if (irq_context) atomic_dec(&irq_context->in_used); } /** * complete_cmd_fusion - Completes command * @instance: Adapter soft state * @MSIxIndex: MSI number * @irq_context: IRQ context * * Completes all commands that is in reply descriptor queue */ static int complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex, struct megasas_irq_context *irq_context) { union MPI2_REPLY_DESCRIPTORS_UNION *desc; struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc; struct MPI2_RAID_SCSI_IO_REQUEST *scsi_io_req; struct fusion_context *fusion; struct megasas_cmd *cmd_mfi; struct megasas_cmd_fusion *cmd_fusion; u16 smid, num_completed; u8 reply_descript_type, *sense, status, extStatus; u32 device_id, data_length; union desc_value d_val; struct LD_LOAD_BALANCE_INFO *lbinfo; int threshold_reply_count = 0; struct scsi_cmnd *scmd_local = NULL; struct MR_TASK_MANAGE_REQUEST *mr_tm_req; struct MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_tm_req; fusion = instance->ctrl_context; if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) return IRQ_HANDLED; if (!access_irq_context(irq_context)) return 0; desc = fusion->reply_frames_desc[MSIxIndex] + fusion->last_reply_idx[MSIxIndex]; reply_desc = (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc; d_val.word = desc->Words; reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) { release_irq_context(irq_context); return IRQ_NONE; } num_completed = 0; while (d_val.u.low != cpu_to_le32(UINT_MAX) && d_val.u.high != cpu_to_le32(UINT_MAX)) { smid = le16_to_cpu(reply_desc->SMID); cmd_fusion = fusion->cmd_list[smid - 1]; scsi_io_req = (struct MPI2_RAID_SCSI_IO_REQUEST *) cmd_fusion->io_request; scmd_local = cmd_fusion->scmd; status = scsi_io_req->RaidContext.raid_context.status; extStatus = scsi_io_req->RaidContext.raid_context.ex_status; sense = cmd_fusion->sense; data_length = scsi_io_req->DataLength; switch (scsi_io_req->Function) { case MPI2_FUNCTION_SCSI_TASK_MGMT: mr_tm_req = (struct MR_TASK_MANAGE_REQUEST *) cmd_fusion->io_request; mpi_tm_req = (struct MPI2_SCSI_TASK_MANAGE_REQUEST *) &mr_tm_req->TmRequest; dev_dbg(&instance->pdev->dev, "TM completion:" "type: 0x%x TaskMID: 0x%x\n", mpi_tm_req->TaskType, mpi_tm_req->TaskMID); complete(&cmd_fusion->done); break; case MPI2_FUNCTION_SCSI_IO_REQUEST: /*Fast Path IO.*/ /* Update load balancing info */ if (fusion->load_balance_info && (megasas_priv(cmd_fusion->scmd)->status & MEGASAS_LOAD_BALANCE_FLAG)) { device_id = MEGASAS_DEV_INDEX(scmd_local); lbinfo = &fusion->load_balance_info[device_id]; atomic_dec(&lbinfo->scsi_pending_cmds[cmd_fusion->pd_r1_lb]); megasas_priv(cmd_fusion->scmd)->status &= ~MEGASAS_LOAD_BALANCE_FLAG; } fallthrough; /* and complete IO */ case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */ atomic_dec(&instance->fw_outstanding); if (cmd_fusion->r1_alt_dev_handle == MR_DEVHANDLE_INVALID) { map_cmd_status(fusion, scmd_local, status, extStatus, le32_to_cpu(data_length), sense); if (instance->ldio_threshold && (megasas_cmd_type(scmd_local) == READ_WRITE_LDIO)) atomic_dec(&instance->ldio_outstanding); megasas_priv(scmd_local)->cmd_priv = NULL; megasas_return_cmd_fusion(instance, cmd_fusion); scsi_dma_unmap(scmd_local); megasas_sdev_busy_dec(instance, scmd_local); scsi_done(scmd_local); } else /* Optimal VD - R1 FP command completion. */ megasas_complete_r1_command(instance, cmd_fusion); break; case MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */ cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; /* Poll mode. Dummy free. * In case of Interrupt mode, caller has reverse check. */ if (cmd_mfi->flags & DRV_DCMD_POLLED_MODE) { cmd_mfi->flags &= ~DRV_DCMD_POLLED_MODE; megasas_return_cmd(instance, cmd_mfi); } else megasas_complete_cmd(instance, cmd_mfi, DID_OK); break; } fusion->last_reply_idx[MSIxIndex]++; if (fusion->last_reply_idx[MSIxIndex] >= fusion->reply_q_depth) fusion->last_reply_idx[MSIxIndex] = 0; desc->Words = cpu_to_le64(ULLONG_MAX); num_completed++; threshold_reply_count++; /* Get the next reply descriptor */ if (!fusion->last_reply_idx[MSIxIndex]) desc = fusion->reply_frames_desc[MSIxIndex]; else desc++; reply_desc = (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc; d_val.word = desc->Words; reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) break; /* * Write to reply post host index register after completing threshold * number of reply counts and still there are more replies in reply queue * pending to be completed */ if (threshold_reply_count >= instance->threshold_reply_count) { if (instance->msix_combined) writel(((MSIxIndex & 0x7) << 24) | fusion->last_reply_idx[MSIxIndex], instance->reply_post_host_index_addr[MSIxIndex/8]); else writel((MSIxIndex << 24) | fusion->last_reply_idx[MSIxIndex], instance->reply_post_host_index_addr[0]); threshold_reply_count = 0; if (irq_context) { if (!irq_context->irq_poll_scheduled) { irq_context->irq_poll_scheduled = true; irq_context->irq_line_enable = true; irq_poll_sched(&irq_context->irqpoll); } release_irq_context(irq_context); return num_completed; } } } if (num_completed) { wmb(); if (instance->msix_combined) writel(((MSIxIndex & 0x7) << 24) | fusion->last_reply_idx[MSIxIndex], instance->reply_post_host_index_addr[MSIxIndex/8]); else writel((MSIxIndex << 24) | fusion->last_reply_idx[MSIxIndex], instance->reply_post_host_index_addr[0]); megasas_check_and_restore_queue_depth(instance); } release_irq_context(irq_context); return num_completed; } int megasas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num) { struct megasas_instance *instance; int num_entries = 0; struct fusion_context *fusion; instance = (struct megasas_instance *)shost->hostdata; fusion = instance->ctrl_context; queue_num = queue_num + instance->low_latency_index_start; if (!atomic_add_unless(&fusion->busy_mq_poll[queue_num], 1, 1)) return 0; num_entries = complete_cmd_fusion(instance, queue_num, NULL); atomic_dec(&fusion->busy_mq_poll[queue_num]); return num_entries; } /** * megasas_enable_irq_poll() - enable irqpoll * @instance: Adapter soft state */ static void megasas_enable_irq_poll(struct megasas_instance *instance) { u32 count, i; struct megasas_irq_context *irq_ctx; count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; for (i = 0; i < count; i++) { irq_ctx = &instance->irq_context[i]; irq_poll_enable(&irq_ctx->irqpoll); } } /** * megasas_sync_irqs - Synchronizes all IRQs owned by adapter * @instance_addr: Adapter soft state address */ static void megasas_sync_irqs(unsigned long instance_addr) { u32 count, i; struct megasas_instance *instance = (struct megasas_instance *)instance_addr; struct megasas_irq_context *irq_ctx; count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; for (i = 0; i < count; i++) { synchronize_irq(pci_irq_vector(instance->pdev, i)); irq_ctx = &instance->irq_context[i]; irq_poll_disable(&irq_ctx->irqpoll); if (irq_ctx->irq_poll_scheduled) { irq_ctx->irq_poll_scheduled = false; enable_irq(irq_ctx->os_irq); complete_cmd_fusion(instance, irq_ctx->MSIxIndex, irq_ctx); } } } /** * megasas_irqpoll() - process a queue for completed reply descriptors * @irqpoll: IRQ poll structure associated with queue to poll. * @budget: Threshold of reply descriptors to process per poll. * * Return: The number of entries processed. */ int megasas_irqpoll(struct irq_poll *irqpoll, int budget) { struct megasas_irq_context *irq_ctx; struct megasas_instance *instance; int num_entries; irq_ctx = container_of(irqpoll, struct megasas_irq_context, irqpoll); instance = irq_ctx->instance; if (irq_ctx->irq_line_enable) { disable_irq_nosync(irq_ctx->os_irq); irq_ctx->irq_line_enable = false; } num_entries = complete_cmd_fusion(instance, irq_ctx->MSIxIndex, irq_ctx); if (num_entries < budget) { irq_poll_complete(irqpoll); irq_ctx->irq_poll_scheduled = false; enable_irq(irq_ctx->os_irq); complete_cmd_fusion(instance, irq_ctx->MSIxIndex, irq_ctx); } return num_entries; } /** * megasas_complete_cmd_dpc_fusion - Completes command * @instance_addr: Adapter soft state address * * Tasklet to complete cmds */ static void megasas_complete_cmd_dpc_fusion(unsigned long instance_addr) { struct megasas_instance *instance = (struct megasas_instance *)instance_addr; struct megasas_irq_context *irq_ctx = NULL; u32 count, MSIxIndex; count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; /* If we have already declared adapter dead, donot complete cmds */ if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) return; for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++) { irq_ctx = &instance->irq_context[MSIxIndex]; complete_cmd_fusion(instance, MSIxIndex, irq_ctx); } } /** * megasas_isr_fusion - isr entry point * @irq: IRQ number * @devp: IRQ context */ static irqreturn_t megasas_isr_fusion(int irq, void *devp) { struct megasas_irq_context *irq_context = devp; struct megasas_instance *instance = irq_context->instance; u32 mfiStatus; if (instance->mask_interrupts) return IRQ_NONE; if (irq_context->irq_poll_scheduled) return IRQ_HANDLED; if (!instance->msix_vectors) { mfiStatus = instance->instancet->clear_intr(instance); if (!mfiStatus) return IRQ_NONE; } /* If we are resetting, bail */ if (test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) { instance->instancet->clear_intr(instance); return IRQ_HANDLED; } return complete_cmd_fusion(instance, irq_context->MSIxIndex, irq_context) ? IRQ_HANDLED : IRQ_NONE; } /** * build_mpt_mfi_pass_thru - builds a cmd fo MFI Pass thru * @instance: Adapter soft state * @mfi_cmd: megasas_cmd pointer * */ static void build_mpt_mfi_pass_thru(struct megasas_instance *instance, struct megasas_cmd *mfi_cmd) { struct MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain; struct MPI2_RAID_SCSI_IO_REQUEST *io_req; struct megasas_cmd_fusion *cmd; struct fusion_context *fusion; struct megasas_header *frame_hdr = &mfi_cmd->frame->hdr; fusion = instance->ctrl_context; cmd = megasas_get_cmd_fusion(instance, instance->max_scsi_cmds + mfi_cmd->index); /* Save the smid. To be used for returning the cmd */ mfi_cmd->context.smid = cmd->index; /* * For cmds where the flag is set, store the flag and check * on completion. For cmds with this flag, don't call * megasas_complete_cmd */ if (frame_hdr->flags & cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)) mfi_cmd->flags |= DRV_DCMD_POLLED_MODE; io_req = cmd->io_request; if (instance->adapter_type >= INVADER_SERIES) { struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL; sgl_ptr_end += fusion->max_sge_in_main_msg - 1; sgl_ptr_end->Flags = 0; } mpi25_ieee_chain = (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain; io_req->Function = MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST; io_req->SGLOffset0 = offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4; io_req->ChainOffset = fusion->chain_offset_mfi_pthru; mpi25_ieee_chain->Address = cpu_to_le64(mfi_cmd->frame_phys_addr); mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; mpi25_ieee_chain->Length = cpu_to_le32(instance->mfi_frame_size); } /** * build_mpt_cmd - Calls helper function to build a cmd MFI Pass thru cmd * @instance: Adapter soft state * @cmd: mfi cmd to build * */ static union MEGASAS_REQUEST_DESCRIPTOR_UNION * build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) { union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc = NULL; u16 index; build_mpt_mfi_pass_thru(instance, cmd); index = cmd->context.smid; req_desc = megasas_get_request_descriptor(instance, index - 1); req_desc->Words = 0; req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); req_desc->SCSIIO.SMID = cpu_to_le16(index); return req_desc; } /** * megasas_issue_dcmd_fusion - Issues a MFI Pass thru cmd * @instance: Adapter soft state * @cmd: mfi cmd pointer * */ static void megasas_issue_dcmd_fusion(struct megasas_instance *instance, struct megasas_cmd *cmd) { union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; req_desc = build_mpt_cmd(instance, cmd); megasas_fire_cmd_fusion(instance, req_desc); return; } /** * megasas_release_fusion - Reverses the FW initialization * @instance: Adapter soft state */ void megasas_release_fusion(struct megasas_instance *instance) { megasas_free_ioc_init_cmd(instance); megasas_free_cmds(instance); megasas_free_cmds_fusion(instance); iounmap(instance->reg_set); pci_release_selected_regions(instance->pdev, 1<<instance->bar); } /** * megasas_read_fw_status_reg_fusion - returns the current FW status value * @instance: Adapter soft state */ static u32 megasas_read_fw_status_reg_fusion(struct megasas_instance *instance) { return megasas_readl(instance, &instance->reg_set->outbound_scratch_pad_0); } /** * megasas_alloc_host_crash_buffer - Host buffers for Crash dump collection from Firmware * @instance: Controller's soft instance * @return: Number of allocated host crash buffers */ static void megasas_alloc_host_crash_buffer(struct megasas_instance *instance) { unsigned int i; for (i = 0; i < MAX_CRASH_DUMP_SIZE; i++) { instance->crash_buf[i] = vzalloc(CRASH_DMA_BUF_SIZE); if (!instance->crash_buf[i]) { dev_info(&instance->pdev->dev, "Firmware crash dump " "memory allocation failed at index %d\n", i); break; } } instance->drv_buf_alloc = i; } /** * megasas_free_host_crash_buffer - Host buffers for Crash dump collection from Firmware * @instance: Controller's soft instance */ void megasas_free_host_crash_buffer(struct megasas_instance *instance) { unsigned int i; for (i = 0; i < instance->drv_buf_alloc; i++) { vfree(instance->crash_buf[i]); } instance->drv_buf_index = 0; instance->drv_buf_alloc = 0; instance->fw_crash_state = UNAVAILABLE; instance->fw_crash_buffer_size = 0; } /** * megasas_adp_reset_fusion - For controller reset * @instance: Controller's soft instance * @regs: MFI register set */ static int megasas_adp_reset_fusion(struct megasas_instance *instance, struct megasas_register_set __iomem *regs) { u32 host_diag, abs_state, retry; /* Now try to reset the chip */ writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &instance->reg_set->fusion_seq_offset); writel(MPI2_WRSEQ_1ST_KEY_VALUE, &instance->reg_set->fusion_seq_offset); writel(MPI2_WRSEQ_2ND_KEY_VALUE, &instance->reg_set->fusion_seq_offset); writel(MPI2_WRSEQ_3RD_KEY_VALUE, &instance->reg_set->fusion_seq_offset); writel(MPI2_WRSEQ_4TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset); writel(MPI2_WRSEQ_5TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset); writel(MPI2_WRSEQ_6TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset); /* Check that the diag write enable (DRWE) bit is on */ host_diag = megasas_readl(instance, &instance->reg_set->fusion_host_diag); retry = 0; while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) { msleep(100); host_diag = megasas_readl(instance, &instance->reg_set->fusion_host_diag); if (retry++ == 100) { dev_warn(&instance->pdev->dev, "Host diag unlock failed from %s %d\n", __func__, __LINE__); break; } } if (!(host_diag & HOST_DIAG_WRITE_ENABLE)) return -1; /* Send chip reset command */ writel(host_diag | HOST_DIAG_RESET_ADAPTER, &instance->reg_set->fusion_host_diag); msleep(3000); /* Make sure reset adapter bit is cleared */ host_diag = megasas_readl(instance, &instance->reg_set->fusion_host_diag); retry = 0; while (host_diag & HOST_DIAG_RESET_ADAPTER) { msleep(100); host_diag = megasas_readl(instance, &instance->reg_set->fusion_host_diag); if (retry++ == 1000) { dev_warn(&instance->pdev->dev, "Diag reset adapter never cleared %s %d\n", __func__, __LINE__); break; } } if (host_diag & HOST_DIAG_RESET_ADAPTER) return -1; abs_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK; retry = 0; while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) { msleep(100); abs_state = instance->instancet-> read_fw_status_reg(instance) & MFI_STATE_MASK; } if (abs_state <= MFI_STATE_FW_INIT) { dev_warn(&instance->pdev->dev, "fw state < MFI_STATE_FW_INIT, state = 0x%x %s %d\n", abs_state, __func__, __LINE__); return -1; } return 0; } /** * megasas_check_reset_fusion - For controller reset check * @instance: Controller's soft instance * @regs: MFI register set */ static int megasas_check_reset_fusion(struct megasas_instance *instance, struct megasas_register_set __iomem *regs) { return 0; } /** * megasas_trigger_snap_dump - Trigger snap dump in FW * @instance: Soft instance of adapter */ static inline void megasas_trigger_snap_dump(struct megasas_instance *instance) { int j; u32 fw_state, abs_state; if (!instance->disableOnlineCtrlReset) { dev_info(&instance->pdev->dev, "Trigger snap dump\n"); writel(MFI_ADP_TRIGGER_SNAP_DUMP, &instance->reg_set->doorbell); readl(&instance->reg_set->doorbell); } for (j = 0; j < instance->snapdump_wait_time; j++) { abs_state = instance->instancet->read_fw_status_reg(instance); fw_state = abs_state & MFI_STATE_MASK; if (fw_state == MFI_STATE_FAULT) { dev_printk(KERN_ERR, &instance->pdev->dev, "FW in FAULT state Fault code:0x%x subcode:0x%x func:%s\n", abs_state & MFI_STATE_FAULT_CODE, abs_state & MFI_STATE_FAULT_SUBCODE, __func__); return; } msleep(1000); } } /* This function waits for outstanding commands on fusion to complete */ static int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance, int reason, int *convert) { int i, outstanding, retval = 0, hb_seconds_missed = 0; u32 fw_state, abs_state; u32 waittime_for_io_completion; waittime_for_io_completion = min_t(u32, resetwaittime, (resetwaittime - instance->snapdump_wait_time)); if (reason == MFI_IO_TIMEOUT_OCR) { dev_info(&instance->pdev->dev, "MFI command is timed out\n"); megasas_complete_cmd_dpc_fusion((unsigned long)instance); if (instance->snapdump_wait_time) megasas_trigger_snap_dump(instance); retval = 1; goto out; } for (i = 0; i < waittime_for_io_completion; i++) { /* Check if firmware is in fault state */ abs_state = instance->instancet->read_fw_status_reg(instance); fw_state = abs_state & MFI_STATE_MASK; if (fw_state == MFI_STATE_FAULT) { dev_printk(KERN_ERR, &instance->pdev->dev, "FW in FAULT state Fault code:0x%x subcode:0x%x func:%s\n", abs_state & MFI_STATE_FAULT_CODE, abs_state & MFI_STATE_FAULT_SUBCODE, __func__); megasas_complete_cmd_dpc_fusion((unsigned long)instance); if (instance->requestorId && reason) { dev_warn(&instance->pdev->dev, "SR-IOV Found FW in FAULT" " state while polling during" " I/O timeout handling for %d\n", instance->host->host_no); *convert = 1; } retval = 1; goto out; } /* If SR-IOV VF mode & heartbeat timeout, don't wait */ if (instance->requestorId && !reason) { retval = 1; goto out; } /* If SR-IOV VF mode & I/O timeout, check for HB timeout */ if (instance->requestorId && (reason == SCSIIO_TIMEOUT_OCR)) { if (instance->hb_host_mem->HB.fwCounter != instance->hb_host_mem->HB.driverCounter) { instance->hb_host_mem->HB.driverCounter = instance->hb_host_mem->HB.fwCounter; hb_seconds_missed = 0; } else { hb_seconds_missed++; if (hb_seconds_missed == (MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF/HZ)) { dev_warn(&instance->pdev->dev, "SR-IOV:" " Heartbeat never completed " " while polling during I/O " " timeout handling for " "scsi%d.\n", instance->host->host_no); *convert = 1; retval = 1; goto out; } } } megasas_complete_cmd_dpc_fusion((unsigned long)instance); outstanding = atomic_read(&instance->fw_outstanding); if (!outstanding) goto out; if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { dev_notice(&instance->pdev->dev, "[%2d]waiting for %d " "commands to complete for scsi%d\n", i, outstanding, instance->host->host_no); } msleep(1000); } if (instance->snapdump_wait_time) { megasas_trigger_snap_dump(instance); retval = 1; goto out; } if (atomic_read(&instance->fw_outstanding)) { dev_err(&instance->pdev->dev, "pending commands remain after waiting, " "will reset adapter scsi%d.\n", instance->host->host_no); *convert = 1; retval = 1; } out: return retval; } void megasas_reset_reply_desc(struct megasas_instance *instance) { int i, j, count; struct fusion_context *fusion; union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc; fusion = instance->ctrl_context; count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; count += instance->iopoll_q_count; for (i = 0 ; i < count ; i++) { fusion->last_reply_idx[i] = 0; reply_desc = fusion->reply_frames_desc[i]; for (j = 0 ; j < fusion->reply_q_depth; j++, reply_desc++) reply_desc->Words = cpu_to_le64(ULLONG_MAX); } } /* * megasas_refire_mgmt_cmd : Re-fire management commands * @instance: Controller's soft instance */ static void megasas_refire_mgmt_cmd(struct megasas_instance *instance, bool return_ioctl) { int j; struct megasas_cmd_fusion *cmd_fusion; struct fusion_context *fusion; struct megasas_cmd *cmd_mfi; union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; struct MPI2_RAID_SCSI_IO_REQUEST *scsi_io_req; u16 smid; bool refire_cmd = false; u8 result; u32 opcode = 0; fusion = instance->ctrl_context; /* Re-fire management commands. * Do not traverse complet MPT frame pool. Start from max_scsi_cmds. */ for (j = instance->max_scsi_cmds ; j < instance->max_fw_cmds; j++) { cmd_fusion = fusion->cmd_list[j]; cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; smid = le16_to_cpu(cmd_mfi->context.smid); result = REFIRE_CMD; if (!smid) continue; req_desc = megasas_get_request_descriptor(instance, smid - 1); switch (cmd_mfi->frame->hdr.cmd) { case MFI_CMD_DCMD: opcode = le32_to_cpu(cmd_mfi->frame->dcmd.opcode); /* Do not refire shutdown command */ if (opcode == MR_DCMD_CTRL_SHUTDOWN) { cmd_mfi->frame->dcmd.cmd_status = MFI_STAT_OK; result = COMPLETE_CMD; break; } refire_cmd = ((opcode != MR_DCMD_LD_MAP_GET_INFO)) && (opcode != MR_DCMD_SYSTEM_PD_MAP_GET_INFO) && !(cmd_mfi->flags & DRV_DCMD_SKIP_REFIRE); if (!refire_cmd) result = RETURN_CMD; break; case MFI_CMD_NVME: if (!instance->support_nvme_passthru) { cmd_mfi->frame->hdr.cmd_status = MFI_STAT_INVALID_CMD; result = COMPLETE_CMD; } break; case MFI_CMD_TOOLBOX: if (!instance->support_pci_lane_margining) { cmd_mfi->frame->hdr.cmd_status = MFI_STAT_INVALID_CMD; result = COMPLETE_CMD; } break; default: break; } if (return_ioctl && cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) { dev_err(&instance->pdev->dev, "return -EBUSY from %s %d cmd 0x%x opcode 0x%x\n", __func__, __LINE__, cmd_mfi->frame->hdr.cmd, le32_to_cpu(cmd_mfi->frame->dcmd.opcode)); cmd_mfi->cmd_status_drv = DCMD_BUSY; result = COMPLETE_CMD; } scsi_io_req = (struct MPI2_RAID_SCSI_IO_REQUEST *) cmd_fusion->io_request; if (scsi_io_req->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) result = RETURN_CMD; switch (result) { case REFIRE_CMD: megasas_fire_cmd_fusion(instance, req_desc); break; case RETURN_CMD: megasas_return_cmd(instance, cmd_mfi); break; case COMPLETE_CMD: megasas_complete_cmd(instance, cmd_mfi, DID_OK); break; } } } /* * megasas_return_polled_cmds: Return polled mode commands back to the pool * before initiating an OCR. * @instance: Controller's soft instance */ static void megasas_return_polled_cmds(struct megasas_instance *instance) { int i; struct megasas_cmd_fusion *cmd_fusion; struct fusion_context *fusion; struct megasas_cmd *cmd_mfi; fusion = instance->ctrl_context; for (i = instance->max_scsi_cmds; i < instance->max_fw_cmds; i++) { cmd_fusion = fusion->cmd_list[i]; cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; if (cmd_mfi->flags & DRV_DCMD_POLLED_MODE) { if (megasas_dbg_lvl & OCR_DEBUG) dev_info(&instance->pdev->dev, "%s %d return cmd 0x%x opcode 0x%x\n", __func__, __LINE__, cmd_mfi->frame->hdr.cmd, le32_to_cpu(cmd_mfi->frame->dcmd.opcode)); cmd_mfi->flags &= ~DRV_DCMD_POLLED_MODE; megasas_return_cmd(instance, cmd_mfi); } } } /* * megasas_track_scsiio : Track SCSI IOs outstanding to a SCSI device * @instance: per adapter struct * @channel: the channel assigned by the OS * @id: the id assigned by the OS * * Returns SUCCESS if no IOs pending to SCSI device, else return FAILED */ static int megasas_track_scsiio(struct megasas_instance *instance, int id, int channel) { int i, found = 0; struct megasas_cmd_fusion *cmd_fusion; struct fusion_context *fusion; fusion = instance->ctrl_context; for (i = 0 ; i < instance->max_scsi_cmds; i++) { cmd_fusion = fusion->cmd_list[i]; if (cmd_fusion->scmd && (cmd_fusion->scmd->device->id == id && cmd_fusion->scmd->device->channel == channel)) { dev_info(&instance->pdev->dev, "SCSI commands pending to target" "channel %d id %d \tSMID: 0x%x\n", channel, id, cmd_fusion->index); scsi_print_command(cmd_fusion->scmd); found = 1; break; } } return found ? FAILED : SUCCESS; } /** * megasas_tm_response_code - translation of device response code * @instance: Controller's soft instance * @mpi_reply: MPI reply returned by firmware * * Return nothing. */ static void megasas_tm_response_code(struct megasas_instance *instance, struct MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply) { char *desc; switch (mpi_reply->ResponseCode) { case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE: desc = "task management request completed"; break; case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME: desc = "invalid frame"; break; case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED: desc = "task management request not supported"; break; case MPI2_SCSITASKMGMT_RSP_TM_FAILED: desc = "task management request failed"; break; case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED: desc = "task management request succeeded"; break; case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN: desc = "invalid lun"; break; case 0xA: desc = "overlapped tag attempted"; break; case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC: desc = "task queued, however not sent to target"; break; default: desc = "unknown"; break; } dev_dbg(&instance->pdev->dev, "response_code(%01x): %s\n", mpi_reply->ResponseCode, desc); dev_dbg(&instance->pdev->dev, "TerminationCount/DevHandle/Function/TaskType/IOCStat/IOCLoginfo" " 0x%x/0x%x/0x%x/0x%x/0x%x/0x%x\n", mpi_reply->TerminationCount, mpi_reply->DevHandle, mpi_reply->Function, mpi_reply->TaskType, mpi_reply->IOCStatus, mpi_reply->IOCLogInfo); } /** * megasas_issue_tm - main routine for sending tm requests * @instance: per adapter struct * @device_handle: device handle * @channel: the channel assigned by the OS * @id: the id assigned by the OS * @smid_task: smid assigned to the task * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in megaraid_sas_fusion.c) * @mr_device_priv_data: private data * Context: user * * MegaRaid use MPT interface for Task Magement request. * A generic API for sending task management requests to firmware. * * Return SUCCESS or FAILED. */ static int megasas_issue_tm(struct megasas_instance *instance, u16 device_handle, uint channel, uint id, u16 smid_task, u8 type, struct MR_PRIV_DEVICE *mr_device_priv_data) { struct MR_TASK_MANAGE_REQUEST *mr_request; struct MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_request; unsigned long timeleft; struct megasas_cmd_fusion *cmd_fusion; struct megasas_cmd *cmd_mfi; union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; struct fusion_context *fusion = NULL; struct megasas_cmd_fusion *scsi_lookup; int rc; int timeout = MEGASAS_DEFAULT_TM_TIMEOUT; struct MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply; fusion = instance->ctrl_context; cmd_mfi = megasas_get_cmd(instance); if (!cmd_mfi) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return -ENOMEM; } cmd_fusion = megasas_get_cmd_fusion(instance, instance->max_scsi_cmds + cmd_mfi->index); /* Save the smid. To be used for returning the cmd */ cmd_mfi->context.smid = cmd_fusion->index; req_desc = megasas_get_request_descriptor(instance, (cmd_fusion->index - 1)); cmd_fusion->request_desc = req_desc; req_desc->Words = 0; mr_request = (struct MR_TASK_MANAGE_REQUEST *) cmd_fusion->io_request; memset(mr_request, 0, sizeof(struct MR_TASK_MANAGE_REQUEST)); mpi_request = (struct MPI2_SCSI_TASK_MANAGE_REQUEST *) &mr_request->TmRequest; mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; mpi_request->DevHandle = cpu_to_le16(device_handle); mpi_request->TaskType = type; mpi_request->TaskMID = cpu_to_le16(smid_task); mpi_request->LUN[1] = 0; req_desc = cmd_fusion->request_desc; req_desc->HighPriority.SMID = cpu_to_le16(cmd_fusion->index); req_desc->HighPriority.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); req_desc->HighPriority.MSIxIndex = 0; req_desc->HighPriority.LMID = 0; req_desc->HighPriority.Reserved1 = 0; if (channel < MEGASAS_MAX_PD_CHANNELS) mr_request->tmReqFlags.isTMForPD = 1; else mr_request->tmReqFlags.isTMForLD = 1; init_completion(&cmd_fusion->done); megasas_fire_cmd_fusion(instance, req_desc); switch (type) { case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK: timeout = mr_device_priv_data->task_abort_tmo; break; case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET: timeout = mr_device_priv_data->target_reset_tmo; break; } timeleft = wait_for_completion_timeout(&cmd_fusion->done, timeout * HZ); if (!timeleft) { dev_err(&instance->pdev->dev, "task mgmt type 0x%x timed out\n", type); mutex_unlock(&instance->reset_mutex); rc = megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR); mutex_lock(&instance->reset_mutex); return rc; } mpi_reply = (struct MPI2_SCSI_TASK_MANAGE_REPLY *) &mr_request->TMReply; megasas_tm_response_code(instance, mpi_reply); megasas_return_cmd(instance, cmd_mfi); rc = SUCCESS; switch (type) { case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK: scsi_lookup = fusion->cmd_list[smid_task - 1]; if (scsi_lookup->scmd == NULL) break; else { instance->instancet->disable_intr(instance); megasas_sync_irqs((unsigned long)instance); instance->instancet->enable_intr(instance); megasas_enable_irq_poll(instance); if (scsi_lookup->scmd == NULL) break; } rc = FAILED; break; case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET: if ((channel == 0xFFFFFFFF) && (id == 0xFFFFFFFF)) break; instance->instancet->disable_intr(instance); megasas_sync_irqs((unsigned long)instance); rc = megasas_track_scsiio(instance, id, channel); instance->instancet->enable_intr(instance); megasas_enable_irq_poll(instance); break; case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK: break; default: rc = FAILED; break; } return rc; } /* * megasas_fusion_smid_lookup : Look for fusion command corresponding to SCSI * @instance: per adapter struct * * Return Non Zero index, if SMID found in outstanding commands */ static u16 megasas_fusion_smid_lookup(struct scsi_cmnd *scmd) { int i, ret = 0; struct megasas_instance *instance; struct megasas_cmd_fusion *cmd_fusion; struct fusion_context *fusion; instance = (struct megasas_instance *)scmd->device->host->hostdata; fusion = instance->ctrl_context; for (i = 0; i < instance->max_scsi_cmds; i++) { cmd_fusion = fusion->cmd_list[i]; if (cmd_fusion->scmd && (cmd_fusion->scmd == scmd)) { scmd_printk(KERN_NOTICE, scmd, "Abort request is for" " SMID: %d\n", cmd_fusion->index); ret = cmd_fusion->index; break; } } return ret; } /* * megasas_get_tm_devhandle - Get devhandle for TM request * @sdev- OS provided scsi device * * Returns- devhandle/targetID of SCSI device */ static u16 megasas_get_tm_devhandle(struct scsi_device *sdev) { u16 pd_index = 0; u32 device_id; struct megasas_instance *instance; struct fusion_context *fusion; struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; u16 devhandle = (u16)ULONG_MAX; instance = (struct megasas_instance *)sdev->host->hostdata; fusion = instance->ctrl_context; if (!MEGASAS_IS_LOGICAL(sdev)) { if (instance->use_seqnum_jbod_fp) { pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; pd_sync = (void *)fusion->pd_seq_sync [(instance->pd_seq_map_id - 1) & 1]; devhandle = pd_sync->seq[pd_index].devHandle; } else sdev_printk(KERN_ERR, sdev, "Firmware expose tmCapable" " without JBOD MAP support from %s %d\n", __func__, __LINE__); } else { device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; devhandle = device_id; } return devhandle; } /* * megasas_task_abort_fusion : SCSI task abort function for fusion adapters * @scmd : pointer to scsi command object * * Return SUCCESS, if command aborted else FAILED */ int megasas_task_abort_fusion(struct scsi_cmnd *scmd) { struct megasas_instance *instance; u16 smid, devhandle; int ret; struct MR_PRIV_DEVICE *mr_device_priv_data; mr_device_priv_data = scmd->device->hostdata; instance = (struct megasas_instance *)scmd->device->host->hostdata; if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL," "SCSI host:%d\n", instance->host->host_no); ret = FAILED; return ret; } if (!mr_device_priv_data) { sdev_printk(KERN_INFO, scmd->device, "device been deleted! " "scmd(%p)\n", scmd); scmd->result = DID_NO_CONNECT << 16; ret = SUCCESS; goto out; } if (!mr_device_priv_data->is_tm_capable) { ret = FAILED; goto out; } mutex_lock(&instance->reset_mutex); smid = megasas_fusion_smid_lookup(scmd); if (!smid) { ret = SUCCESS; scmd_printk(KERN_NOTICE, scmd, "Command for which abort is" " issued is not found in outstanding commands\n"); mutex_unlock(&instance->reset_mutex); goto out; } devhandle = megasas_get_tm_devhandle(scmd->device); if (devhandle == (u16)ULONG_MAX) { ret = FAILED; sdev_printk(KERN_INFO, scmd->device, "task abort issued for invalid devhandle\n"); mutex_unlock(&instance->reset_mutex); goto out; } sdev_printk(KERN_INFO, scmd->device, "attempting task abort! scmd(0x%p) tm_dev_handle 0x%x\n", scmd, devhandle); mr_device_priv_data->tm_busy = true; ret = megasas_issue_tm(instance, devhandle, scmd->device->channel, scmd->device->id, smid, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, mr_device_priv_data); mr_device_priv_data->tm_busy = false; mutex_unlock(&instance->reset_mutex); scmd_printk(KERN_INFO, scmd, "task abort %s!! scmd(0x%p)\n", ((ret == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); out: scsi_print_command(scmd); if (megasas_dbg_lvl & TM_DEBUG) megasas_dump_fusion_io(scmd); return ret; } /* * megasas_reset_target_fusion : target reset function for fusion adapters * scmd: SCSI command pointer * * Returns SUCCESS if all commands associated with target aborted else FAILED */ int megasas_reset_target_fusion(struct scsi_cmnd *scmd) { struct megasas_instance *instance; int ret = FAILED; u16 devhandle; struct MR_PRIV_DEVICE *mr_device_priv_data; mr_device_priv_data = scmd->device->hostdata; instance = (struct megasas_instance *)scmd->device->host->hostdata; if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL," "SCSI host:%d\n", instance->host->host_no); ret = FAILED; return ret; } if (!mr_device_priv_data) { sdev_printk(KERN_INFO, scmd->device, "device been deleted! scmd: (0x%p)\n", scmd); scmd->result = DID_NO_CONNECT << 16; ret = SUCCESS; goto out; } if (!mr_device_priv_data->is_tm_capable) { ret = FAILED; goto out; } mutex_lock(&instance->reset_mutex); devhandle = megasas_get_tm_devhandle(scmd->device); if (devhandle == (u16)ULONG_MAX) { ret = FAILED; sdev_printk(KERN_INFO, scmd->device, "target reset issued for invalid devhandle\n"); mutex_unlock(&instance->reset_mutex); goto out; } sdev_printk(KERN_INFO, scmd->device, "attempting target reset! scmd(0x%p) tm_dev_handle: 0x%x\n", scmd, devhandle); mr_device_priv_data->tm_busy = true; ret = megasas_issue_tm(instance, devhandle, scmd->device->channel, scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, mr_device_priv_data); mr_device_priv_data->tm_busy = false; mutex_unlock(&instance->reset_mutex); scmd_printk(KERN_NOTICE, scmd, "target reset %s!!\n", (ret == SUCCESS) ? "SUCCESS" : "FAILED"); out: return ret; } /*SRIOV get other instance in cluster if any*/ static struct megasas_instance *megasas_get_peer_instance(struct megasas_instance *instance) { int i; for (i = 0; i < MAX_MGMT_ADAPTERS; i++) { if (megasas_mgmt_info.instance[i] && (megasas_mgmt_info.instance[i] != instance) && megasas_mgmt_info.instance[i]->requestorId && megasas_mgmt_info.instance[i]->peerIsPresent && (memcmp((megasas_mgmt_info.instance[i]->clusterId), instance->clusterId, MEGASAS_CLUSTER_ID_SIZE) == 0)) return megasas_mgmt_info.instance[i]; } return NULL; } /* Check for a second path that is currently UP */ int megasas_check_mpio_paths(struct megasas_instance *instance, struct scsi_cmnd *scmd) { struct megasas_instance *peer_instance = NULL; int retval = (DID_REQUEUE << 16); if (instance->peerIsPresent) { peer_instance = megasas_get_peer_instance(instance); if ((peer_instance) && (atomic_read(&peer_instance->adprecovery) == MEGASAS_HBA_OPERATIONAL)) retval = (DID_NO_CONNECT << 16); } return retval; } /* Core fusion reset function */ int megasas_reset_fusion(struct Scsi_Host *shost, int reason) { int retval = SUCCESS, i, j, convert = 0; struct megasas_instance *instance; struct megasas_cmd_fusion *cmd_fusion, *r1_cmd; struct fusion_context *fusion; u32 abs_state, status_reg, reset_adapter, fpio_count = 0; u32 io_timeout_in_crash_mode = 0; struct scsi_cmnd *scmd_local = NULL; struct scsi_device *sdev; int ret_target_prop = DCMD_FAILED; bool is_target_prop = false; bool do_adp_reset = true; int max_reset_tries = MEGASAS_FUSION_MAX_RESET_TRIES; instance = (struct megasas_instance *)shost->hostdata; fusion = instance->ctrl_context; mutex_lock(&instance->reset_mutex); if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { dev_warn(&instance->pdev->dev, "Hardware critical error, " "returning FAILED for scsi%d.\n", instance->host->host_no); mutex_unlock(&instance->reset_mutex); return FAILED; } status_reg = instance->instancet->read_fw_status_reg(instance); abs_state = status_reg & MFI_STATE_MASK; /* IO timeout detected, forcibly put FW in FAULT state */ if (abs_state != MFI_STATE_FAULT && instance->crash_dump_buf && instance->crash_dump_app_support && reason) { dev_info(&instance->pdev->dev, "IO/DCMD timeout is detected, " "forcibly FAULT Firmware\n"); atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); status_reg = megasas_readl(instance, &instance->reg_set->doorbell); writel(status_reg | MFI_STATE_FORCE_OCR, &instance->reg_set->doorbell); readl(&instance->reg_set->doorbell); mutex_unlock(&instance->reset_mutex); do { ssleep(3); io_timeout_in_crash_mode++; dev_dbg(&instance->pdev->dev, "waiting for [%d] " "seconds for crash dump collection and OCR " "to be done\n", (io_timeout_in_crash_mode * 3)); } while ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) && (io_timeout_in_crash_mode < 80)); if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) { dev_info(&instance->pdev->dev, "OCR done for IO " "timeout case\n"); retval = SUCCESS; } else { dev_info(&instance->pdev->dev, "Controller is not " "operational after 240 seconds wait for IO " "timeout case in FW crash dump mode\n do " "OCR/kill adapter\n"); retval = megasas_reset_fusion(shost, 0); } return retval; } if (instance->requestorId && !instance->skip_heartbeat_timer_del) del_timer_sync(&instance->sriov_heartbeat_timer); set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); set_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE, &instance->reset_flags); atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_POLLING); instance->instancet->disable_intr(instance); megasas_sync_irqs((unsigned long)instance); /* First try waiting for commands to complete */ if (megasas_wait_for_outstanding_fusion(instance, reason, &convert)) { atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); dev_warn(&instance->pdev->dev, "resetting fusion " "adapter scsi%d.\n", instance->host->host_no); if (convert) reason = 0; if (megasas_dbg_lvl & OCR_DEBUG) dev_info(&instance->pdev->dev, "\nPending SCSI commands:\n"); /* Now return commands back to the OS */ for (i = 0 ; i < instance->max_scsi_cmds; i++) { cmd_fusion = fusion->cmd_list[i]; /*check for extra commands issued by driver*/ if (instance->adapter_type >= VENTURA_SERIES) { r1_cmd = fusion->cmd_list[i + instance->max_fw_cmds]; megasas_return_cmd_fusion(instance, r1_cmd); } scmd_local = cmd_fusion->scmd; if (cmd_fusion->scmd) { if (megasas_dbg_lvl & OCR_DEBUG) { sdev_printk(KERN_INFO, cmd_fusion->scmd->device, "SMID: 0x%x\n", cmd_fusion->index); megasas_dump_fusion_io(cmd_fusion->scmd); } if (cmd_fusion->io_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) fpio_count++; scmd_local->result = megasas_check_mpio_paths(instance, scmd_local); if (instance->ldio_threshold && megasas_cmd_type(scmd_local) == READ_WRITE_LDIO) atomic_dec(&instance->ldio_outstanding); megasas_return_cmd_fusion(instance, cmd_fusion); scsi_dma_unmap(scmd_local); scsi_done(scmd_local); } } dev_info(&instance->pdev->dev, "Outstanding fastpath IOs: %d\n", fpio_count); atomic_set(&instance->fw_outstanding, 0); status_reg = instance->instancet->read_fw_status_reg(instance); abs_state = status_reg & MFI_STATE_MASK; reset_adapter = status_reg & MFI_RESET_ADAPTER; if (instance->disableOnlineCtrlReset || (abs_state == MFI_STATE_FAULT && !reset_adapter)) { /* Reset not supported, kill adapter */ dev_warn(&instance->pdev->dev, "Reset not supported" ", killing adapter scsi%d.\n", instance->host->host_no); goto kill_hba; } /* Let SR-IOV VF & PF sync up if there was a HB failure */ if (instance->requestorId && !reason) { msleep(MEGASAS_OCR_SETTLE_TIME_VF); do_adp_reset = false; max_reset_tries = MEGASAS_SRIOV_MAX_RESET_TRIES_VF; } /* Now try to reset the chip */ for (i = 0; i < max_reset_tries; i++) { /* * Do adp reset and wait for * controller to transition to ready */ if (megasas_adp_reset_wait_for_ready(instance, do_adp_reset, 1) == FAILED) continue; /* Wait for FW to become ready */ if (megasas_transition_to_ready(instance, 1)) { dev_warn(&instance->pdev->dev, "Failed to transition controller to ready for " "scsi%d.\n", instance->host->host_no); continue; } megasas_reset_reply_desc(instance); megasas_fusion_update_can_queue(instance, OCR_CONTEXT); if (megasas_ioc_init_fusion(instance)) { continue; } if (megasas_get_ctrl_info(instance)) { dev_info(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); goto kill_hba; } megasas_refire_mgmt_cmd(instance, (i == (MEGASAS_FUSION_MAX_RESET_TRIES - 1) ? 1 : 0)); /* Reset load balance info */ if (fusion->load_balance_info) memset(fusion->load_balance_info, 0, (sizeof(struct LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT)); if (!megasas_get_map_info(instance)) { megasas_sync_map_info(instance); } else { /* * Return pending polled mode cmds before * retrying OCR */ megasas_return_polled_cmds(instance); continue; } megasas_setup_jbod_map(instance); /* reset stream detection array */ if (instance->adapter_type >= VENTURA_SERIES) { for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) { memset(fusion->stream_detect_by_ld[j], 0, sizeof(struct LD_STREAM_DETECT)); fusion->stream_detect_by_ld[j]->mru_bit_map = MR_STREAM_BITMAP; } } clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); instance->instancet->enable_intr(instance); megasas_enable_irq_poll(instance); shost_for_each_device(sdev, shost) { if ((instance->tgt_prop) && (instance->nvme_page_size)) ret_target_prop = megasas_get_target_prop(instance, sdev); is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false; megasas_set_dynamic_target_properties(sdev, is_target_prop); } status_reg = instance->instancet->read_fw_status_reg (instance); abs_state = status_reg & MFI_STATE_MASK; if (abs_state != MFI_STATE_OPERATIONAL) { dev_info(&instance->pdev->dev, "Adapter is not OPERATIONAL, state 0x%x for scsi:%d\n", abs_state, instance->host->host_no); goto out; } atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); dev_info(&instance->pdev->dev, "Adapter is OPERATIONAL for scsi:%d\n", instance->host->host_no); /* Restart SR-IOV heartbeat */ if (instance->requestorId) { if (!megasas_sriov_start_heartbeat(instance, 0)) megasas_start_timer(instance); else instance->skip_heartbeat_timer_del = 1; } if (instance->crash_dump_drv_support && instance->crash_dump_app_support) megasas_set_crash_dump_params(instance, MR_CRASH_BUF_TURN_ON); else megasas_set_crash_dump_params(instance, MR_CRASH_BUF_TURN_OFF); if (instance->snapdump_wait_time) { megasas_get_snapdump_properties(instance); dev_info(&instance->pdev->dev, "Snap dump wait time\t: %d\n", instance->snapdump_wait_time); } retval = SUCCESS; /* Adapter reset completed successfully */ dev_warn(&instance->pdev->dev, "Reset successful for scsi%d.\n", instance->host->host_no); goto out; } /* Reset failed, kill the adapter */ dev_warn(&instance->pdev->dev, "Reset failed, killing " "adapter scsi%d.\n", instance->host->host_no); goto kill_hba; } else { /* For VF: Restart HB timer if we didn't OCR */ if (instance->requestorId) { megasas_start_timer(instance); } clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); instance->instancet->enable_intr(instance); megasas_enable_irq_poll(instance); atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); goto out; } kill_hba: megaraid_sas_kill_hba(instance); megasas_enable_irq_poll(instance); instance->skip_heartbeat_timer_del = 1; retval = FAILED; out: clear_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE, &instance->reset_flags); mutex_unlock(&instance->reset_mutex); return retval; } /* Fusion Crash dump collection */ static void megasas_fusion_crash_dump(struct megasas_instance *instance) { u32 status_reg; u8 partial_copy = 0; int wait = 0; status_reg = instance->instancet->read_fw_status_reg(instance); /* * Allocate host crash buffers to copy data from 1 MB DMA crash buffer * to host crash buffers */ if (instance->drv_buf_index == 0) { /* Buffer is already allocated for old Crash dump. * Do OCR and do not wait for crash dump collection */ if (instance->drv_buf_alloc) { dev_info(&instance->pdev->dev, "earlier crash dump is " "not yet copied by application, ignoring this " "crash dump and initiating OCR\n"); status_reg |= MFI_STATE_CRASH_DUMP_DONE; writel(status_reg, &instance->reg_set->outbound_scratch_pad_0); readl(&instance->reg_set->outbound_scratch_pad_0); return; } megasas_alloc_host_crash_buffer(instance); dev_info(&instance->pdev->dev, "Number of host crash buffers " "allocated: %d\n", instance->drv_buf_alloc); } while (!(status_reg & MFI_STATE_CRASH_DUMP_DONE) && (wait < MEGASAS_WATCHDOG_WAIT_COUNT)) { if (!(status_reg & MFI_STATE_DMADONE)) { /* * Next crash dump buffer is not yet DMA'd by FW * Check after 10ms. Wait for 1 second for FW to * post the next buffer. If not bail out. */ wait++; msleep(MEGASAS_WAIT_FOR_NEXT_DMA_MSECS); status_reg = instance->instancet->read_fw_status_reg( instance); continue; } wait = 0; if (instance->drv_buf_index >= instance->drv_buf_alloc) { dev_info(&instance->pdev->dev, "Driver is done copying the buffer: %d\n", instance->drv_buf_alloc); status_reg |= MFI_STATE_CRASH_DUMP_DONE; partial_copy = 1; break; } else { memcpy(instance->crash_buf[instance->drv_buf_index], instance->crash_dump_buf, CRASH_DMA_BUF_SIZE); instance->drv_buf_index++; status_reg &= ~MFI_STATE_DMADONE; } writel(status_reg, &instance->reg_set->outbound_scratch_pad_0); readl(&instance->reg_set->outbound_scratch_pad_0); msleep(MEGASAS_WAIT_FOR_NEXT_DMA_MSECS); status_reg = instance->instancet->read_fw_status_reg(instance); } if (status_reg & MFI_STATE_CRASH_DUMP_DONE) { dev_info(&instance->pdev->dev, "Crash Dump is available,number " "of copied buffers: %d\n", instance->drv_buf_index); instance->fw_crash_buffer_size = instance->drv_buf_index; instance->fw_crash_state = AVAILABLE; instance->drv_buf_index = 0; writel(status_reg, &instance->reg_set->outbound_scratch_pad_0); readl(&instance->reg_set->outbound_scratch_pad_0); if (!partial_copy) megasas_reset_fusion(instance->host, 0); } } /* Fusion OCR work queue */ void megasas_fusion_ocr_wq(struct work_struct *work) { struct megasas_instance *instance = container_of(work, struct megasas_instance, work_init); megasas_reset_fusion(instance->host, 0); } /* Allocate fusion context */ int megasas_alloc_fusion_context(struct megasas_instance *instance) { struct fusion_context *fusion; instance->ctrl_context = kzalloc(sizeof(struct fusion_context), GFP_KERNEL); if (!instance->ctrl_context) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return -ENOMEM; } fusion = instance->ctrl_context; fusion->log_to_span_pages = get_order(MAX_LOGICAL_DRIVES_EXT * sizeof(LD_SPAN_INFO)); fusion->log_to_span = (PLD_SPAN_INFO)__get_free_pages(GFP_KERNEL | __GFP_ZERO, fusion->log_to_span_pages); if (!fusion->log_to_span) { fusion->log_to_span = vzalloc(array_size(MAX_LOGICAL_DRIVES_EXT, sizeof(LD_SPAN_INFO))); if (!fusion->log_to_span) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return -ENOMEM; } } fusion->load_balance_info_pages = get_order(MAX_LOGICAL_DRIVES_EXT * sizeof(struct LD_LOAD_BALANCE_INFO)); fusion->load_balance_info = (struct LD_LOAD_BALANCE_INFO *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, fusion->load_balance_info_pages); if (!fusion->load_balance_info) { fusion->load_balance_info = vzalloc(array_size(MAX_LOGICAL_DRIVES_EXT, sizeof(struct LD_LOAD_BALANCE_INFO))); if (!fusion->load_balance_info) dev_err(&instance->pdev->dev, "Failed to allocate load_balance_info, " "continuing without Load Balance support\n"); } return 0; } void megasas_free_fusion_context(struct megasas_instance *instance) { struct fusion_context *fusion = instance->ctrl_context; if (fusion) { if (fusion->load_balance_info) { if (is_vmalloc_addr(fusion->load_balance_info)) vfree(fusion->load_balance_info); else free_pages((ulong)fusion->load_balance_info, fusion->load_balance_info_pages); } if (fusion->log_to_span) { if (is_vmalloc_addr(fusion->log_to_span)) vfree(fusion->log_to_span); else free_pages((ulong)fusion->log_to_span, fusion->log_to_span_pages); } kfree(fusion); } } struct megasas_instance_template megasas_instance_template_fusion = { .enable_intr = megasas_enable_intr_fusion, .disable_intr = megasas_disable_intr_fusion, .clear_intr = megasas_clear_intr_fusion, .read_fw_status_reg = megasas_read_fw_status_reg_fusion, .adp_reset = megasas_adp_reset_fusion, .check_reset = megasas_check_reset_fusion, .service_isr = megasas_isr_fusion, .tasklet = megasas_complete_cmd_dpc_fusion, .init_adapter = megasas_init_adapter_fusion, .build_and_issue_cmd = megasas_build_and_issue_cmd_fusion, .issue_dcmd = megasas_issue_dcmd_fusion, };
linux-master
drivers/scsi/megaraid/megaraid_sas_fusion.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * * Linux MegaRAID device driver * * Copyright (c) 2003-2004 LSI Logic Corporation. * * FILE : megaraid_mm.c * Version : v2.20.2.7 (Jul 16 2006) * * Common management module */ #include <linux/sched.h> #include <linux/slab.h> #include <linux/mutex.h> #include "megaraid_mm.h" // Entry points for char node driver static DEFINE_MUTEX(mraid_mm_mutex); static int mraid_mm_open(struct inode *, struct file *); static long mraid_mm_unlocked_ioctl(struct file *, uint, unsigned long); // routines to convert to and from the old the format static int mimd_to_kioc(mimd_t __user *, mraid_mmadp_t *, uioc_t *); static int kioc_to_mimd(uioc_t *, mimd_t __user *); // Helper functions static int handle_drvrcmd(void __user *, uint8_t, int *); static int lld_ioctl(mraid_mmadp_t *, uioc_t *); static void ioctl_done(uioc_t *); static void lld_timedout(struct timer_list *); static void hinfo_to_cinfo(mraid_hba_info_t *, mcontroller_t *); static mraid_mmadp_t *mraid_mm_get_adapter(mimd_t __user *, int *); static uioc_t *mraid_mm_alloc_kioc(mraid_mmadp_t *); static void mraid_mm_dealloc_kioc(mraid_mmadp_t *, uioc_t *); static int mraid_mm_attach_buf(mraid_mmadp_t *, uioc_t *, int); static int mraid_mm_setup_dma_pools(mraid_mmadp_t *); static void mraid_mm_free_adp_resources(mraid_mmadp_t *); static void mraid_mm_teardown_dma_pools(mraid_mmadp_t *); MODULE_AUTHOR("LSI Logic Corporation"); MODULE_DESCRIPTION("LSI Logic Management Module"); MODULE_LICENSE("GPL"); MODULE_VERSION(LSI_COMMON_MOD_VERSION); static int dbglevel = CL_ANN; module_param_named(dlevel, dbglevel, int, 0); MODULE_PARM_DESC(dlevel, "Debug level (default=0)"); EXPORT_SYMBOL(mraid_mm_register_adp); EXPORT_SYMBOL(mraid_mm_unregister_adp); EXPORT_SYMBOL(mraid_mm_adapter_app_handle); static uint32_t drvr_ver = 0x02200207; static int adapters_count_g; static struct list_head adapters_list_g; static wait_queue_head_t wait_q; static const struct file_operations lsi_fops = { .open = mraid_mm_open, .unlocked_ioctl = mraid_mm_unlocked_ioctl, .compat_ioctl = compat_ptr_ioctl, .owner = THIS_MODULE, .llseek = noop_llseek, }; static struct miscdevice megaraid_mm_dev = { .minor = MISC_DYNAMIC_MINOR, .name = "megadev0", .fops = &lsi_fops, }; /** * mraid_mm_open - open routine for char node interface * @inode : unused * @filep : unused * * Allow ioctl operations by apps only if they have superuser privilege. */ static int mraid_mm_open(struct inode *inode, struct file *filep) { /* * Only allow superuser to access private ioctl interface */ if (!capable(CAP_SYS_ADMIN)) return (-EACCES); return 0; } /** * mraid_mm_ioctl - module entry-point for ioctls * @filep : file operations pointer (ignored) * @cmd : ioctl command * @arg : user ioctl packet */ static int mraid_mm_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) { uioc_t *kioc; char signature[EXT_IOCTL_SIGN_SZ] = {0}; int rval; mraid_mmadp_t *adp; uint8_t old_ioctl; int drvrcmd_rval; void __user *argp = (void __user *)arg; /* * Make sure only USCSICMD are issued through this interface. * MIMD application would still fire different command. */ if ((_IOC_TYPE(cmd) != MEGAIOC_MAGIC) && (cmd != USCSICMD)) { return (-EINVAL); } /* * Look for signature to see if this is the new or old ioctl format. */ if (copy_from_user(signature, argp, EXT_IOCTL_SIGN_SZ)) { con_log(CL_ANN, (KERN_WARNING "megaraid cmm: copy from usr addr failed\n")); return (-EFAULT); } if (memcmp(signature, EXT_IOCTL_SIGN, EXT_IOCTL_SIGN_SZ) == 0) old_ioctl = 0; else old_ioctl = 1; /* * At present, we don't support the new ioctl packet */ if (!old_ioctl ) return (-EINVAL); /* * If it is a driver ioctl (as opposed to fw ioctls), then we can * handle the command locally. rval > 0 means it is not a drvr cmd */ rval = handle_drvrcmd(argp, old_ioctl, &drvrcmd_rval); if (rval < 0) return rval; else if (rval == 0) return drvrcmd_rval; rval = 0; if ((adp = mraid_mm_get_adapter(argp, &rval)) == NULL) { return rval; } /* * Check if adapter can accept ioctl. We may have marked it offline * if any previous kioc had timedout on this controller. */ if (!adp->quiescent) { con_log(CL_ANN, (KERN_WARNING "megaraid cmm: controller cannot accept cmds due to " "earlier errors\n" )); return -EFAULT; } /* * The following call will block till a kioc is available * or return NULL if the list head is empty for the pointer * of type mraid_mmapt passed to mraid_mm_alloc_kioc */ kioc = mraid_mm_alloc_kioc(adp); if (!kioc) return -ENXIO; /* * User sent the old mimd_t ioctl packet. Convert it to uioc_t. */ if ((rval = mimd_to_kioc(argp, adp, kioc))) { mraid_mm_dealloc_kioc(adp, kioc); return rval; } kioc->done = ioctl_done; /* * Issue the IOCTL to the low level driver. After the IOCTL completes * release the kioc if and only if it was _not_ timedout. If it was * timedout, that means that resources are still with low level driver. */ if ((rval = lld_ioctl(adp, kioc))) { if (!kioc->timedout) mraid_mm_dealloc_kioc(adp, kioc); return rval; } /* * Convert the kioc back to user space */ rval = kioc_to_mimd(kioc, argp); /* * Return the kioc to free pool */ mraid_mm_dealloc_kioc(adp, kioc); return rval; } static long mraid_mm_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) { int err; mutex_lock(&mraid_mm_mutex); err = mraid_mm_ioctl(filep, cmd, arg); mutex_unlock(&mraid_mm_mutex); return err; } /** * mraid_mm_get_adapter - Returns corresponding adapters for the mimd packet * @umimd : User space mimd_t ioctl packet * @rval : returned success/error status * * The function return value is a pointer to the located @adapter. */ static mraid_mmadp_t * mraid_mm_get_adapter(mimd_t __user *umimd, int *rval) { mraid_mmadp_t *adapter; mimd_t mimd; uint32_t adapno; int iterator; bool is_found; if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) { *rval = -EFAULT; return NULL; } adapno = GETADAP(mimd.ui.fcs.adapno); if (adapno >= adapters_count_g) { *rval = -ENODEV; return NULL; } adapter = NULL; iterator = 0; is_found = false; list_for_each_entry(adapter, &adapters_list_g, list) { if (iterator++ == adapno) { is_found = true; break; } } if (!is_found) { *rval = -ENODEV; return NULL; } return adapter; } /** * handle_drvrcmd - Checks if the opcode is a driver cmd and if it is, handles it. * @arg : packet sent by the user app * @old_ioctl : mimd if 1; uioc otherwise * @rval : pointer for command's returned value (not function status) */ static int handle_drvrcmd(void __user *arg, uint8_t old_ioctl, int *rval) { mimd_t __user *umimd; mimd_t kmimd; uint8_t opcode; uint8_t subopcode; if (old_ioctl) goto old_packet; else goto new_packet; new_packet: return (-ENOTSUPP); old_packet: *rval = 0; umimd = arg; if (copy_from_user(&kmimd, umimd, sizeof(mimd_t))) return (-EFAULT); opcode = kmimd.ui.fcs.opcode; subopcode = kmimd.ui.fcs.subopcode; /* * If the opcode is 0x82 and the subopcode is either GET_DRVRVER or * GET_NUMADP, then we can handle. Otherwise we should return 1 to * indicate that we cannot handle this. */ if (opcode != 0x82) return 1; switch (subopcode) { case MEGAIOC_QDRVRVER: if (copy_to_user(kmimd.data, &drvr_ver, sizeof(uint32_t))) return (-EFAULT); return 0; case MEGAIOC_QNADAP: *rval = adapters_count_g; if (copy_to_user(kmimd.data, &adapters_count_g, sizeof(uint32_t))) return (-EFAULT); return 0; default: /* cannot handle */ return 1; } return 0; } /** * mimd_to_kioc - Converter from old to new ioctl format * @umimd : user space old MIMD IOCTL * @adp : adapter softstate * @kioc : kernel space new format IOCTL * * Routine to convert MIMD interface IOCTL to new interface IOCTL packet. The * new packet is in kernel space so that driver can perform operations on it * freely. */ static int mimd_to_kioc(mimd_t __user *umimd, mraid_mmadp_t *adp, uioc_t *kioc) { mbox64_t *mbox64; mbox_t *mbox; mraid_passthru_t *pthru32; uint32_t adapno; uint8_t opcode; uint8_t subopcode; mimd_t mimd; if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) return (-EFAULT); /* * Applications are not allowed to send extd pthru */ if ((mimd.mbox[0] == MBOXCMD_PASSTHRU64) || (mimd.mbox[0] == MBOXCMD_EXTPTHRU)) return (-EINVAL); opcode = mimd.ui.fcs.opcode; subopcode = mimd.ui.fcs.subopcode; adapno = GETADAP(mimd.ui.fcs.adapno); if (adapno >= adapters_count_g) return (-ENODEV); kioc->adapno = adapno; kioc->mb_type = MBOX_LEGACY; kioc->app_type = APPTYPE_MIMD; switch (opcode) { case 0x82: if (subopcode == MEGAIOC_QADAPINFO) { kioc->opcode = GET_ADAP_INFO; kioc->data_dir = UIOC_RD; kioc->xferlen = sizeof(mraid_hba_info_t); if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen)) return (-ENOMEM); } else { con_log(CL_ANN, (KERN_WARNING "megaraid cmm: Invalid subop\n")); return (-EINVAL); } break; case 0x81: kioc->opcode = MBOX_CMD; kioc->xferlen = mimd.ui.fcs.length; kioc->user_data_len = kioc->xferlen; kioc->user_data = mimd.ui.fcs.buffer; if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen)) return (-ENOMEM); if (mimd.outlen) kioc->data_dir = UIOC_RD; if (mimd.inlen) kioc->data_dir |= UIOC_WR; break; case 0x80: kioc->opcode = MBOX_CMD; kioc->xferlen = (mimd.outlen > mimd.inlen) ? mimd.outlen : mimd.inlen; kioc->user_data_len = kioc->xferlen; kioc->user_data = mimd.data; if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen)) return (-ENOMEM); if (mimd.outlen) kioc->data_dir = UIOC_RD; if (mimd.inlen) kioc->data_dir |= UIOC_WR; break; default: return (-EINVAL); } /* * If driver command, nothing else to do */ if (opcode == 0x82) return 0; /* * This is a mailbox cmd; copy the mailbox from mimd */ mbox64 = (mbox64_t *)((unsigned long)kioc->cmdbuf); mbox = &mbox64->mbox32; memcpy(mbox, mimd.mbox, 14); if (mbox->cmd != MBOXCMD_PASSTHRU) { // regular DCMD mbox->xferaddr = (uint32_t)kioc->buf_paddr; if (kioc->data_dir & UIOC_WR) { if (copy_from_user(kioc->buf_vaddr, kioc->user_data, kioc->xferlen)) { return (-EFAULT); } } return 0; } /* * This is a regular 32-bit pthru cmd; mbox points to pthru struct. * Just like in above case, the beginning for memblk is treated as * a mailbox. The passthru will begin at next 1K boundary. And the * data will start 1K after that. */ pthru32 = kioc->pthru32; kioc->user_pthru = &umimd->pthru; mbox->xferaddr = (uint32_t)kioc->pthru32_h; if (copy_from_user(pthru32, kioc->user_pthru, sizeof(mraid_passthru_t))) { return (-EFAULT); } pthru32->dataxferaddr = kioc->buf_paddr; if (kioc->data_dir & UIOC_WR) { if (pthru32->dataxferlen > kioc->xferlen) return -EINVAL; if (copy_from_user(kioc->buf_vaddr, kioc->user_data, pthru32->dataxferlen)) { return (-EFAULT); } } return 0; } /** * mraid_mm_attach_buf - Attach a free dma buffer for required size * @adp : Adapter softstate * @kioc : kioc that the buffer needs to be attached to * @xferlen : required length for buffer * * First we search for a pool with smallest buffer that is >= @xferlen. If * that pool has no free buffer, we will try for the next bigger size. If none * is available, we will try to allocate the smallest buffer that is >= * @xferlen and attach it the pool. */ static int mraid_mm_attach_buf(mraid_mmadp_t *adp, uioc_t *kioc, int xferlen) { mm_dmapool_t *pool; int right_pool = -1; unsigned long flags; int i; kioc->pool_index = -1; kioc->buf_vaddr = NULL; kioc->buf_paddr = 0; kioc->free_buf = 0; /* * We need xferlen amount of memory. See if we can get it from our * dma pools. If we don't get exact size, we will try bigger buffer */ for (i = 0; i < MAX_DMA_POOLS; i++) { pool = &adp->dma_pool_list[i]; if (xferlen > pool->buf_size) continue; if (right_pool == -1) right_pool = i; spin_lock_irqsave(&pool->lock, flags); if (!pool->in_use) { pool->in_use = 1; kioc->pool_index = i; kioc->buf_vaddr = pool->vaddr; kioc->buf_paddr = pool->paddr; spin_unlock_irqrestore(&pool->lock, flags); return 0; } else { spin_unlock_irqrestore(&pool->lock, flags); continue; } } /* * If xferlen doesn't match any of our pools, return error */ if (right_pool == -1) return -EINVAL; /* * We did not get any buffer from the preallocated pool. Let us try * to allocate one new buffer. NOTE: This is a blocking call. */ pool = &adp->dma_pool_list[right_pool]; spin_lock_irqsave(&pool->lock, flags); kioc->pool_index = right_pool; kioc->free_buf = 1; kioc->buf_vaddr = dma_pool_alloc(pool->handle, GFP_ATOMIC, &kioc->buf_paddr); spin_unlock_irqrestore(&pool->lock, flags); if (!kioc->buf_vaddr) return -ENOMEM; return 0; } /** * mraid_mm_alloc_kioc - Returns a uioc_t from free list * @adp : Adapter softstate for this module * * The kioc_semaphore is initialized with number of kioc nodes in the * free kioc pool. If the kioc pool is empty, this function blocks till * a kioc becomes free. */ static uioc_t * mraid_mm_alloc_kioc(mraid_mmadp_t *adp) { uioc_t *kioc; struct list_head* head; unsigned long flags; down(&adp->kioc_semaphore); spin_lock_irqsave(&adp->kioc_pool_lock, flags); head = &adp->kioc_pool; if (list_empty(head)) { up(&adp->kioc_semaphore); spin_unlock_irqrestore(&adp->kioc_pool_lock, flags); con_log(CL_ANN, ("megaraid cmm: kioc list empty!\n")); return NULL; } kioc = list_entry(head->next, uioc_t, list); list_del_init(&kioc->list); spin_unlock_irqrestore(&adp->kioc_pool_lock, flags); memset((caddr_t)(unsigned long)kioc->cmdbuf, 0, sizeof(mbox64_t)); memset((caddr_t) kioc->pthru32, 0, sizeof(mraid_passthru_t)); kioc->buf_vaddr = NULL; kioc->buf_paddr = 0; kioc->pool_index =-1; kioc->free_buf = 0; kioc->user_data = NULL; kioc->user_data_len = 0; kioc->user_pthru = NULL; kioc->timedout = 0; return kioc; } /** * mraid_mm_dealloc_kioc - Return kioc to free pool * @adp : Adapter softstate * @kioc : uioc_t node to be returned to free pool */ static void mraid_mm_dealloc_kioc(mraid_mmadp_t *adp, uioc_t *kioc) { mm_dmapool_t *pool; unsigned long flags; if (kioc->pool_index != -1) { pool = &adp->dma_pool_list[kioc->pool_index]; /* This routine may be called in non-isr context also */ spin_lock_irqsave(&pool->lock, flags); /* * While attaching the dma buffer, if we didn't get the * required buffer from the pool, we would have allocated * it at the run time and set the free_buf flag. We must * free that buffer. Otherwise, just mark that the buffer is * not in use */ if (kioc->free_buf == 1) dma_pool_free(pool->handle, kioc->buf_vaddr, kioc->buf_paddr); else pool->in_use = 0; spin_unlock_irqrestore(&pool->lock, flags); } /* Return the kioc to the free pool */ spin_lock_irqsave(&adp->kioc_pool_lock, flags); list_add(&kioc->list, &adp->kioc_pool); spin_unlock_irqrestore(&adp->kioc_pool_lock, flags); /* increment the free kioc count */ up(&adp->kioc_semaphore); return; } /** * lld_ioctl - Routine to issue ioctl to low level drvr * @adp : The adapter handle * @kioc : The ioctl packet with kernel addresses */ static int lld_ioctl(mraid_mmadp_t *adp, uioc_t *kioc) { int rval; struct uioc_timeout timeout = { }; kioc->status = -ENODATA; rval = adp->issue_uioc(adp->drvr_data, kioc, IOCTL_ISSUE); if (rval) return rval; /* * Start the timer */ if (adp->timeout > 0) { timeout.uioc = kioc; timer_setup_on_stack(&timeout.timer, lld_timedout, 0); timeout.timer.expires = jiffies + adp->timeout * HZ; add_timer(&timeout.timer); } /* * Wait till the low level driver completes the ioctl. After this * call, the ioctl either completed successfully or timedout. */ wait_event(wait_q, (kioc->status != -ENODATA)); if (timeout.timer.function) { del_timer_sync(&timeout.timer); destroy_timer_on_stack(&timeout.timer); } /* * If the command had timedout, we mark the controller offline * before returning */ if (kioc->timedout) { adp->quiescent = 0; } return kioc->status; } /** * ioctl_done - callback from the low level driver * @kioc : completed ioctl packet */ static void ioctl_done(uioc_t *kioc) { uint32_t adapno; int iterator; mraid_mmadp_t* adapter; bool is_found; /* * When the kioc returns from driver, make sure it still doesn't * have ENODATA in status. Otherwise, driver will hang on wait_event * forever */ if (kioc->status == -ENODATA) { con_log(CL_ANN, (KERN_WARNING "megaraid cmm: lld didn't change status!\n")); kioc->status = -EINVAL; } /* * Check if this kioc was timedout before. If so, nobody is waiting * on this kioc. We don't have to wake up anybody. Instead, we just * have to free the kioc */ if (kioc->timedout) { iterator = 0; adapter = NULL; adapno = kioc->adapno; is_found = false; con_log(CL_ANN, ( KERN_WARNING "megaraid cmm: completed " "ioctl that was timedout before\n")); list_for_each_entry(adapter, &adapters_list_g, list) { if (iterator++ == adapno) { is_found = true; break; } } kioc->timedout = 0; if (is_found) mraid_mm_dealloc_kioc( adapter, kioc ); } else { wake_up(&wait_q); } } /** * lld_timedout - callback from the expired timer * @t : timer that timed out */ static void lld_timedout(struct timer_list *t) { struct uioc_timeout *timeout = from_timer(timeout, t, timer); uioc_t *kioc = timeout->uioc; kioc->status = -ETIME; kioc->timedout = 1; con_log(CL_ANN, (KERN_WARNING "megaraid cmm: ioctl timed out\n")); wake_up(&wait_q); } /** * kioc_to_mimd - Converter from new back to old format * @kioc : Kernel space IOCTL packet (successfully issued) * @mimd : User space MIMD packet */ static int kioc_to_mimd(uioc_t *kioc, mimd_t __user *mimd) { mimd_t kmimd; uint8_t opcode; uint8_t subopcode; mbox64_t *mbox64; mraid_passthru_t __user *upthru32; mraid_passthru_t *kpthru32; mcontroller_t cinfo; mraid_hba_info_t *hinfo; if (copy_from_user(&kmimd, mimd, sizeof(mimd_t))) return (-EFAULT); opcode = kmimd.ui.fcs.opcode; subopcode = kmimd.ui.fcs.subopcode; if (opcode == 0x82) { switch (subopcode) { case MEGAIOC_QADAPINFO: hinfo = (mraid_hba_info_t *)(unsigned long) kioc->buf_vaddr; hinfo_to_cinfo(hinfo, &cinfo); if (copy_to_user(kmimd.data, &cinfo, sizeof(cinfo))) return (-EFAULT); return 0; default: return (-EINVAL); } return 0; } mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf; if (kioc->user_pthru) { upthru32 = kioc->user_pthru; kpthru32 = kioc->pthru32; if (copy_to_user(&upthru32->scsistatus, &kpthru32->scsistatus, sizeof(uint8_t))) { return (-EFAULT); } } if (kioc->user_data) { if (copy_to_user(kioc->user_data, kioc->buf_vaddr, kioc->user_data_len)) { return (-EFAULT); } } if (copy_to_user(&mimd->mbox[17], &mbox64->mbox32.status, sizeof(uint8_t))) { return (-EFAULT); } return 0; } /** * hinfo_to_cinfo - Convert new format hba info into old format * @hinfo : New format, more comprehensive adapter info * @cinfo : Old format adapter info to support mimd_t apps */ static void hinfo_to_cinfo(mraid_hba_info_t *hinfo, mcontroller_t *cinfo) { if (!hinfo || !cinfo) return; cinfo->base = hinfo->baseport; cinfo->irq = hinfo->irq; cinfo->numldrv = hinfo->num_ldrv; cinfo->pcibus = hinfo->pci_bus; cinfo->pcidev = hinfo->pci_slot; cinfo->pcifun = PCI_FUNC(hinfo->pci_dev_fn); cinfo->pciid = hinfo->pci_device_id; cinfo->pcivendor = hinfo->pci_vendor_id; cinfo->pcislot = hinfo->pci_slot; cinfo->uid = hinfo->unique_id; } /** * mraid_mm_register_adp - Registration routine for low level drivers * @lld_adp : Adapter object */ int mraid_mm_register_adp(mraid_mmadp_t *lld_adp) { mraid_mmadp_t *adapter; mbox64_t *mbox_list; uioc_t *kioc; uint32_t rval; int i; if (lld_adp->drvr_type != DRVRTYPE_MBOX) return (-EINVAL); adapter = kzalloc(sizeof(mraid_mmadp_t), GFP_KERNEL); if (!adapter) return -ENOMEM; adapter->unique_id = lld_adp->unique_id; adapter->drvr_type = lld_adp->drvr_type; adapter->drvr_data = lld_adp->drvr_data; adapter->pdev = lld_adp->pdev; adapter->issue_uioc = lld_adp->issue_uioc; adapter->timeout = lld_adp->timeout; adapter->max_kioc = lld_adp->max_kioc; adapter->quiescent = 1; /* * Allocate single blocks of memory for all required kiocs, * mailboxes and passthru structures. */ adapter->kioc_list = kmalloc_array(lld_adp->max_kioc, sizeof(uioc_t), GFP_KERNEL); adapter->mbox_list = kmalloc_array(lld_adp->max_kioc, sizeof(mbox64_t), GFP_KERNEL); adapter->pthru_dma_pool = dma_pool_create("megaraid mm pthru pool", &adapter->pdev->dev, sizeof(mraid_passthru_t), 16, 0); if (!adapter->kioc_list || !adapter->mbox_list || !adapter->pthru_dma_pool) { con_log(CL_ANN, (KERN_WARNING "megaraid cmm: out of memory, %s %d\n", __func__, __LINE__)); rval = (-ENOMEM); goto memalloc_error; } /* * Slice kioc_list and make a kioc_pool with the individiual kiocs */ INIT_LIST_HEAD(&adapter->kioc_pool); spin_lock_init(&adapter->kioc_pool_lock); sema_init(&adapter->kioc_semaphore, lld_adp->max_kioc); mbox_list = (mbox64_t *)adapter->mbox_list; for (i = 0; i < lld_adp->max_kioc; i++) { kioc = adapter->kioc_list + i; kioc->cmdbuf = (uint64_t)(unsigned long)(mbox_list + i); kioc->pthru32 = dma_pool_alloc(adapter->pthru_dma_pool, GFP_KERNEL, &kioc->pthru32_h); if (!kioc->pthru32) { con_log(CL_ANN, (KERN_WARNING "megaraid cmm: out of memory, %s %d\n", __func__, __LINE__)); rval = (-ENOMEM); goto pthru_dma_pool_error; } list_add_tail(&kioc->list, &adapter->kioc_pool); } // Setup the dma pools for data buffers if ((rval = mraid_mm_setup_dma_pools(adapter)) != 0) { goto dma_pool_error; } list_add_tail(&adapter->list, &adapters_list_g); adapters_count_g++; return 0; dma_pool_error: /* Do nothing */ pthru_dma_pool_error: for (i = 0; i < lld_adp->max_kioc; i++) { kioc = adapter->kioc_list + i; if (kioc->pthru32) { dma_pool_free(adapter->pthru_dma_pool, kioc->pthru32, kioc->pthru32_h); } } memalloc_error: kfree(adapter->kioc_list); kfree(adapter->mbox_list); dma_pool_destroy(adapter->pthru_dma_pool); kfree(adapter); return rval; } /** * mraid_mm_adapter_app_handle - return the application handle for this adapter * @unique_id : adapter unique identifier * * For the given driver data, locate the adapter in our global list and * return the corresponding handle, which is also used by applications to * uniquely identify an adapter. * * Return adapter handle if found in the list. * Return 0 if adapter could not be located, should never happen though. */ uint32_t mraid_mm_adapter_app_handle(uint32_t unique_id) { mraid_mmadp_t *adapter; mraid_mmadp_t *tmp; int index = 0; list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) { if (adapter->unique_id == unique_id) { return MKADAP(index); } index++; } return 0; } /** * mraid_mm_setup_dma_pools - Set up dma buffer pools per adapter * @adp : Adapter softstate * * We maintain a pool of dma buffers per each adapter. Each pool has one * buffer. E.g, we may have 5 dma pools - one each for 4k, 8k ... 64k buffers. * We have just one 4k buffer in 4k pool, one 8k buffer in 8k pool etc. We * dont' want to waste too much memory by allocating more buffers per each * pool. */ static int mraid_mm_setup_dma_pools(mraid_mmadp_t *adp) { mm_dmapool_t *pool; int bufsize; int i; /* * Create MAX_DMA_POOLS number of pools */ bufsize = MRAID_MM_INIT_BUFF_SIZE; for (i = 0; i < MAX_DMA_POOLS; i++){ pool = &adp->dma_pool_list[i]; pool->buf_size = bufsize; spin_lock_init(&pool->lock); pool->handle = dma_pool_create("megaraid mm data buffer", &adp->pdev->dev, bufsize, 16, 0); if (!pool->handle) { goto dma_pool_setup_error; } pool->vaddr = dma_pool_alloc(pool->handle, GFP_KERNEL, &pool->paddr); if (!pool->vaddr) goto dma_pool_setup_error; bufsize = bufsize * 2; } return 0; dma_pool_setup_error: mraid_mm_teardown_dma_pools(adp); return (-ENOMEM); } /** * mraid_mm_unregister_adp - Unregister routine for low level drivers * @unique_id : UID of the adpater * * Assumes no outstanding ioctls to llds. */ int mraid_mm_unregister_adp(uint32_t unique_id) { mraid_mmadp_t *adapter; mraid_mmadp_t *tmp; list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) { if (adapter->unique_id == unique_id) { adapters_count_g--; list_del_init(&adapter->list); mraid_mm_free_adp_resources(adapter); kfree(adapter); con_log(CL_ANN, ( "megaraid cmm: Unregistered one adapter:%#x\n", unique_id)); return 0; } } return (-ENODEV); } /** * mraid_mm_free_adp_resources - Free adapter softstate * @adp : Adapter softstate */ static void mraid_mm_free_adp_resources(mraid_mmadp_t *adp) { uioc_t *kioc; int i; mraid_mm_teardown_dma_pools(adp); for (i = 0; i < adp->max_kioc; i++) { kioc = adp->kioc_list + i; dma_pool_free(adp->pthru_dma_pool, kioc->pthru32, kioc->pthru32_h); } kfree(adp->kioc_list); kfree(adp->mbox_list); dma_pool_destroy(adp->pthru_dma_pool); return; } /** * mraid_mm_teardown_dma_pools - Free all per adapter dma buffers * @adp : Adapter softstate */ static void mraid_mm_teardown_dma_pools(mraid_mmadp_t *adp) { int i; mm_dmapool_t *pool; for (i = 0; i < MAX_DMA_POOLS; i++) { pool = &adp->dma_pool_list[i]; if (pool->handle) { if (pool->vaddr) dma_pool_free(pool->handle, pool->vaddr, pool->paddr); dma_pool_destroy(pool->handle); pool->handle = NULL; } } return; } /** * mraid_mm_init - Module entry point */ static int __init mraid_mm_init(void) { int err; // Announce the driver version con_log(CL_ANN, (KERN_INFO "megaraid cmm: %s %s\n", LSI_COMMON_MOD_VERSION, LSI_COMMON_MOD_EXT_VERSION)); err = misc_register(&megaraid_mm_dev); if (err < 0) { con_log(CL_ANN, ("megaraid cmm: cannot register misc device\n")); return err; } init_waitqueue_head(&wait_q); INIT_LIST_HEAD(&adapters_list_g); return 0; } /** * mraid_mm_exit - Module exit point */ static void __exit mraid_mm_exit(void) { con_log(CL_DLEVEL1 , ("exiting common mod\n")); misc_deregister(&megaraid_mm_dev); } module_init(mraid_mm_init); module_exit(mraid_mm_exit); /* vi: set ts=8 sw=8 tw=78: */
linux-master
drivers/scsi/megaraid/megaraid_mm.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Basic HP/COMPAQ MSA 1000 support. This is only needed if your HW cannot be * upgraded. * * Copyright (C) 2006 Red Hat, Inc. All rights reserved. * Copyright (C) 2006 Mike Christie * Copyright (C) 2008 Hannes Reinecke <[email protected]> */ #include <linux/slab.h> #include <linux/module.h> #include <scsi/scsi.h> #include <scsi/scsi_dbg.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_dh.h> #define HP_SW_NAME "hp_sw" #define HP_SW_TIMEOUT (60 * HZ) #define HP_SW_RETRIES 3 #define HP_SW_PATH_UNINITIALIZED -1 #define HP_SW_PATH_ACTIVE 0 #define HP_SW_PATH_PASSIVE 1 struct hp_sw_dh_data { int path_state; int retries; int retry_cnt; struct scsi_device *sdev; }; static int hp_sw_start_stop(struct hp_sw_dh_data *); /* * tur_done - Handle TEST UNIT READY return status * @sdev: sdev the command has been sent to * @errors: blk error code * * Returns SCSI_DH_DEV_OFFLINED if the sdev is on the passive path */ static int tur_done(struct scsi_device *sdev, struct hp_sw_dh_data *h, struct scsi_sense_hdr *sshdr) { int ret = SCSI_DH_IO; switch (sshdr->sense_key) { case UNIT_ATTENTION: ret = SCSI_DH_IMM_RETRY; break; case NOT_READY: if (sshdr->asc == 0x04 && sshdr->ascq == 2) { /* * LUN not ready - Initialization command required * * This is the passive path */ h->path_state = HP_SW_PATH_PASSIVE; ret = SCSI_DH_OK; break; } fallthrough; default: sdev_printk(KERN_WARNING, sdev, "%s: sending tur failed, sense %x/%x/%x\n", HP_SW_NAME, sshdr->sense_key, sshdr->asc, sshdr->ascq); break; } return ret; } /* * hp_sw_tur - Send TEST UNIT READY * @sdev: sdev command should be sent to * * Use the TEST UNIT READY command to determine * the path state. */ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h) { unsigned char cmd[6] = { TEST_UNIT_READY }; struct scsi_sense_hdr sshdr; int ret = SCSI_DH_OK, res; blk_opf_t opf = REQ_OP_DRV_IN | REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; const struct scsi_exec_args exec_args = { .sshdr = &sshdr, }; retry: res = scsi_execute_cmd(sdev, cmd, opf, NULL, 0, HP_SW_TIMEOUT, HP_SW_RETRIES, &exec_args); if (res) { if (scsi_sense_valid(&sshdr)) ret = tur_done(sdev, h, &sshdr); else { sdev_printk(KERN_WARNING, sdev, "%s: sending tur failed with %x\n", HP_SW_NAME, res); ret = SCSI_DH_IO; } } else { h->path_state = HP_SW_PATH_ACTIVE; ret = SCSI_DH_OK; } if (ret == SCSI_DH_IMM_RETRY) goto retry; return ret; } /* * hp_sw_start_stop - Send START STOP UNIT command * @sdev: sdev command should be sent to * * Sending START STOP UNIT activates the SP. */ static int hp_sw_start_stop(struct hp_sw_dh_data *h) { unsigned char cmd[6] = { START_STOP, 0, 0, 0, 1, 0 }; struct scsi_sense_hdr sshdr; struct scsi_device *sdev = h->sdev; int res, rc = SCSI_DH_OK; int retry_cnt = HP_SW_RETRIES; blk_opf_t opf = REQ_OP_DRV_IN | REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; const struct scsi_exec_args exec_args = { .sshdr = &sshdr, }; retry: res = scsi_execute_cmd(sdev, cmd, opf, NULL, 0, HP_SW_TIMEOUT, HP_SW_RETRIES, &exec_args); if (res) { if (!scsi_sense_valid(&sshdr)) { sdev_printk(KERN_WARNING, sdev, "%s: sending start_stop_unit failed, " "no sense available\n", HP_SW_NAME); return SCSI_DH_IO; } switch (sshdr.sense_key) { case NOT_READY: if (sshdr.asc == 0x04 && sshdr.ascq == 3) { /* * LUN not ready - manual intervention required * * Switch-over in progress, retry. */ if (--retry_cnt) goto retry; rc = SCSI_DH_RETRY; break; } fallthrough; default: sdev_printk(KERN_WARNING, sdev, "%s: sending start_stop_unit failed, " "sense %x/%x/%x\n", HP_SW_NAME, sshdr.sense_key, sshdr.asc, sshdr.ascq); rc = SCSI_DH_IO; } } return rc; } static blk_status_t hp_sw_prep_fn(struct scsi_device *sdev, struct request *req) { struct hp_sw_dh_data *h = sdev->handler_data; if (h->path_state != HP_SW_PATH_ACTIVE) { req->rq_flags |= RQF_QUIET; return BLK_STS_IOERR; } return BLK_STS_OK; } /* * hp_sw_activate - Activate a path * @sdev: sdev on the path to be activated * * The HP Active/Passive firmware is pretty simple; * the passive path reports NOT READY with sense codes * 0x04/0x02; a START STOP UNIT command will then * activate the passive path (and deactivate the * previously active one). */ static int hp_sw_activate(struct scsi_device *sdev, activate_complete fn, void *data) { int ret = SCSI_DH_OK; struct hp_sw_dh_data *h = sdev->handler_data; ret = hp_sw_tur(sdev, h); if (ret == SCSI_DH_OK && h->path_state == HP_SW_PATH_PASSIVE) ret = hp_sw_start_stop(h); if (fn) fn(data, ret); return 0; } static int hp_sw_bus_attach(struct scsi_device *sdev) { struct hp_sw_dh_data *h; int ret; h = kzalloc(sizeof(*h), GFP_KERNEL); if (!h) return SCSI_DH_NOMEM; h->path_state = HP_SW_PATH_UNINITIALIZED; h->retries = HP_SW_RETRIES; h->sdev = sdev; ret = hp_sw_tur(sdev, h); if (ret != SCSI_DH_OK) goto failed; if (h->path_state == HP_SW_PATH_UNINITIALIZED) { ret = SCSI_DH_NOSYS; goto failed; } sdev_printk(KERN_INFO, sdev, "%s: attached to %s path\n", HP_SW_NAME, h->path_state == HP_SW_PATH_ACTIVE? "active":"passive"); sdev->handler_data = h; return SCSI_DH_OK; failed: kfree(h); return ret; } static void hp_sw_bus_detach( struct scsi_device *sdev ) { kfree(sdev->handler_data); sdev->handler_data = NULL; } static struct scsi_device_handler hp_sw_dh = { .name = HP_SW_NAME, .module = THIS_MODULE, .attach = hp_sw_bus_attach, .detach = hp_sw_bus_detach, .activate = hp_sw_activate, .prep_fn = hp_sw_prep_fn, }; static int __init hp_sw_init(void) { return scsi_register_device_handler(&hp_sw_dh); } static void __exit hp_sw_exit(void) { scsi_unregister_device_handler(&hp_sw_dh); } module_init(hp_sw_init); module_exit(hp_sw_exit); MODULE_DESCRIPTION("HP Active/Passive driver"); MODULE_AUTHOR("Mike Christie <[email protected]"); MODULE_LICENSE("GPL");
linux-master
drivers/scsi/device_handler/scsi_dh_hp_sw.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Target driver for EMC CLARiiON AX/CX-series hardware. * Based on code from Lars Marowsky-Bree <[email protected]> * and Ed Goggin <[email protected]>. * * Copyright (C) 2006 Red Hat, Inc. All rights reserved. * Copyright (C) 2006 Mike Christie */ #include <linux/slab.h> #include <linux/module.h> #include <scsi/scsi.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_dh.h> #include <scsi/scsi_device.h> #define CLARIION_NAME "emc" #define CLARIION_TRESPASS_PAGE 0x22 #define CLARIION_BUFFER_SIZE 0xFC #define CLARIION_TIMEOUT (60 * HZ) #define CLARIION_RETRIES 3 #define CLARIION_UNBOUND_LU -1 #define CLARIION_SP_A 0 #define CLARIION_SP_B 1 /* Flags */ #define CLARIION_SHORT_TRESPASS 1 #define CLARIION_HONOR_RESERVATIONS 2 /* LUN states */ #define CLARIION_LUN_UNINITIALIZED -1 #define CLARIION_LUN_UNBOUND 0 #define CLARIION_LUN_BOUND 1 #define CLARIION_LUN_OWNED 2 static unsigned char long_trespass[] = { 0, 0, 0, 0, 0, 0, 0, 0, CLARIION_TRESPASS_PAGE, /* Page code */ 0x09, /* Page length - 2 */ 0x01, /* Trespass code */ 0xff, 0xff, /* Trespass target */ 0, 0, 0, 0, 0, 0 /* Reserved bytes / unknown */ }; static unsigned char short_trespass[] = { 0, 0, 0, 0, CLARIION_TRESPASS_PAGE, /* Page code */ 0x02, /* Page length - 2 */ 0x01, /* Trespass code */ 0xff, /* Trespass target */ }; static const char * lun_state[] = { "not bound", "bound", "owned", }; struct clariion_dh_data { /* * Flags: * CLARIION_SHORT_TRESPASS * Use short trespass command (FC-series) or the long version * (default for AX/CX CLARiiON arrays). * * CLARIION_HONOR_RESERVATIONS * Whether or not (default) to honor SCSI reservations when * initiating a switch-over. */ unsigned flags; /* * I/O buffer for both MODE_SELECT and INQUIRY commands. */ unsigned char buffer[CLARIION_BUFFER_SIZE]; /* * LUN state */ int lun_state; /* * SP Port number */ int port; /* * which SP (A=0,B=1,UNBOUND=-1) is the default SP for this * path's mapped LUN */ int default_sp; /* * which SP (A=0,B=1,UNBOUND=-1) is the active SP for this * path's mapped LUN */ int current_sp; }; /* * Parse MODE_SELECT cmd reply. */ static int trespass_endio(struct scsi_device *sdev, struct scsi_sense_hdr *sshdr) { int err = SCSI_DH_IO; sdev_printk(KERN_ERR, sdev, "%s: Found valid sense data 0x%2x, " "0x%2x, 0x%2x while sending CLARiiON trespass " "command.\n", CLARIION_NAME, sshdr->sense_key, sshdr->asc, sshdr->ascq); if (sshdr->sense_key == 0x05 && sshdr->asc == 0x04 && sshdr->ascq == 0x00) { /* * Array based copy in progress -- do not send * mode_select or copy will be aborted mid-stream. */ sdev_printk(KERN_INFO, sdev, "%s: Array Based Copy in " "progress while sending CLARiiON trespass " "command.\n", CLARIION_NAME); err = SCSI_DH_DEV_TEMP_BUSY; } else if (sshdr->sense_key == 0x02 && sshdr->asc == 0x04 && sshdr->ascq == 0x03) { /* * LUN Not Ready - Manual Intervention Required * indicates in-progress ucode upgrade (NDU). */ sdev_printk(KERN_INFO, sdev, "%s: Detected in-progress " "ucode upgrade NDU operation while sending " "CLARiiON trespass command.\n", CLARIION_NAME); err = SCSI_DH_DEV_TEMP_BUSY; } else err = SCSI_DH_DEV_FAILED; return err; } static int parse_sp_info_reply(struct scsi_device *sdev, struct clariion_dh_data *csdev) { int err = SCSI_DH_OK; /* check for in-progress ucode upgrade (NDU) */ if (csdev->buffer[48] != 0) { sdev_printk(KERN_NOTICE, sdev, "%s: Detected in-progress " "ucode upgrade NDU operation while finding " "current active SP.", CLARIION_NAME); err = SCSI_DH_DEV_TEMP_BUSY; goto out; } if (csdev->buffer[4] > 2) { /* Invalid buffer format */ sdev_printk(KERN_NOTICE, sdev, "%s: invalid VPD page 0xC0 format\n", CLARIION_NAME); err = SCSI_DH_NOSYS; goto out; } switch (csdev->buffer[28] & 0x0f) { case 6: sdev_printk(KERN_NOTICE, sdev, "%s: ALUA failover mode detected\n", CLARIION_NAME); break; case 4: /* Linux failover */ break; default: sdev_printk(KERN_WARNING, sdev, "%s: Invalid failover mode %d\n", CLARIION_NAME, csdev->buffer[28] & 0x0f); err = SCSI_DH_NOSYS; goto out; } csdev->default_sp = csdev->buffer[5]; csdev->lun_state = csdev->buffer[4]; csdev->current_sp = csdev->buffer[8]; csdev->port = csdev->buffer[7]; if (csdev->lun_state == CLARIION_LUN_OWNED) sdev->access_state = SCSI_ACCESS_STATE_OPTIMAL; else sdev->access_state = SCSI_ACCESS_STATE_STANDBY; if (csdev->default_sp == csdev->current_sp) sdev->access_state |= SCSI_ACCESS_STATE_PREFERRED; out: return err; } #define emc_default_str "FC (Legacy)" static char * parse_sp_model(struct scsi_device *sdev, unsigned char *buffer) { unsigned char len = buffer[4] + 5; char *sp_model = NULL; unsigned char sp_len, serial_len; if (len < 160) { sdev_printk(KERN_WARNING, sdev, "%s: Invalid information section length %d\n", CLARIION_NAME, len); /* Check for old FC arrays */ if (!strncmp(buffer + 8, "DGC", 3)) { /* Old FC array, not supporting extended information */ sp_model = emc_default_str; } goto out; } /* * Parse extended information for SP model number */ serial_len = buffer[160]; if (serial_len == 0 || serial_len + 161 > len) { sdev_printk(KERN_WARNING, sdev, "%s: Invalid array serial number length %d\n", CLARIION_NAME, serial_len); goto out; } sp_len = buffer[99]; if (sp_len == 0 || serial_len + sp_len + 161 > len) { sdev_printk(KERN_WARNING, sdev, "%s: Invalid model number length %d\n", CLARIION_NAME, sp_len); goto out; } sp_model = &buffer[serial_len + 161]; /* Strip whitespace at the end */ while (sp_len > 1 && sp_model[sp_len - 1] == ' ') sp_len--; sp_model[sp_len] = '\0'; out: return sp_model; } static int send_trespass_cmd(struct scsi_device *sdev, struct clariion_dh_data *csdev) { unsigned char *page22; unsigned char cdb[MAX_COMMAND_SIZE]; int err, res = SCSI_DH_OK, len; struct scsi_sense_hdr sshdr; blk_opf_t opf = REQ_OP_DRV_OUT | REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; const struct scsi_exec_args exec_args = { .sshdr = &sshdr, }; if (csdev->flags & CLARIION_SHORT_TRESPASS) { page22 = short_trespass; if (!(csdev->flags & CLARIION_HONOR_RESERVATIONS)) /* Set Honor Reservations bit */ page22[6] |= 0x80; len = sizeof(short_trespass); cdb[0] = MODE_SELECT; cdb[1] = 0x10; cdb[4] = len; } else { page22 = long_trespass; if (!(csdev->flags & CLARIION_HONOR_RESERVATIONS)) /* Set Honor Reservations bit */ page22[10] |= 0x80; len = sizeof(long_trespass); cdb[0] = MODE_SELECT_10; cdb[8] = len; } BUG_ON((len > CLARIION_BUFFER_SIZE)); memcpy(csdev->buffer, page22, len); err = scsi_execute_cmd(sdev, cdb, opf, csdev->buffer, len, CLARIION_TIMEOUT * HZ, CLARIION_RETRIES, &exec_args); if (err) { if (scsi_sense_valid(&sshdr)) res = trespass_endio(sdev, &sshdr); else { sdev_printk(KERN_INFO, sdev, "%s: failed to send MODE SELECT: %x\n", CLARIION_NAME, err); res = SCSI_DH_IO; } } return res; } static enum scsi_disposition clariion_check_sense(struct scsi_device *sdev, struct scsi_sense_hdr *sense_hdr) { switch (sense_hdr->sense_key) { case NOT_READY: if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x03) /* * LUN Not Ready - Manual Intervention Required * indicates this is a passive path. * * FIXME: However, if this is seen and EVPD C0 * indicates that this is due to a NDU in * progress, we should set FAIL_PATH too. * This indicates we might have to do a SCSI * inquiry in the end_io path. Ugh. * * Can return FAILED only when we want the error * recovery process to kick in. */ return SUCCESS; break; case ILLEGAL_REQUEST: if (sense_hdr->asc == 0x25 && sense_hdr->ascq == 0x01) /* * An array based copy is in progress. Do not * fail the path, do not bypass to another PG, * do not retry. Fail the IO immediately. * (Actually this is the same conclusion as in * the default handler, but lets make sure.) * * Can return FAILED only when we want the error * recovery process to kick in. */ return SUCCESS; break; case UNIT_ATTENTION: if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00) /* * Unit Attention Code. This is the first IO * to the new path, so just retry. */ return ADD_TO_MLQUEUE; break; } return SCSI_RETURN_NOT_HANDLED; } static blk_status_t clariion_prep_fn(struct scsi_device *sdev, struct request *req) { struct clariion_dh_data *h = sdev->handler_data; if (h->lun_state != CLARIION_LUN_OWNED) { req->rq_flags |= RQF_QUIET; return BLK_STS_IOERR; } return BLK_STS_OK; } static int clariion_std_inquiry(struct scsi_device *sdev, struct clariion_dh_data *csdev) { int err = SCSI_DH_OK; char *sp_model; sp_model = parse_sp_model(sdev, sdev->inquiry); if (!sp_model) { err = SCSI_DH_DEV_UNSUPP; goto out; } /* * FC Series arrays do not support long trespass */ if (!strlen(sp_model) || !strncmp(sp_model, "FC",2)) csdev->flags |= CLARIION_SHORT_TRESPASS; sdev_printk(KERN_INFO, sdev, "%s: detected Clariion %s, flags %x\n", CLARIION_NAME, sp_model, csdev->flags); out: return err; } static int clariion_send_inquiry(struct scsi_device *sdev, struct clariion_dh_data *csdev) { int err = SCSI_DH_IO; if (!scsi_get_vpd_page(sdev, 0xC0, csdev->buffer, CLARIION_BUFFER_SIZE)) err = parse_sp_info_reply(sdev, csdev); return err; } static int clariion_activate(struct scsi_device *sdev, activate_complete fn, void *data) { struct clariion_dh_data *csdev = sdev->handler_data; int result; result = clariion_send_inquiry(sdev, csdev); if (result != SCSI_DH_OK) goto done; if (csdev->lun_state == CLARIION_LUN_OWNED) goto done; result = send_trespass_cmd(sdev, csdev); if (result != SCSI_DH_OK) goto done; sdev_printk(KERN_INFO, sdev,"%s: %s trespass command sent\n", CLARIION_NAME, csdev->flags&CLARIION_SHORT_TRESPASS?"short":"long" ); /* Update status */ result = clariion_send_inquiry(sdev, csdev); if (result != SCSI_DH_OK) goto done; done: sdev_printk(KERN_INFO, sdev, "%s: at SP %c Port %d (%s, default SP %c)\n", CLARIION_NAME, csdev->current_sp + 'A', csdev->port, lun_state[csdev->lun_state], csdev->default_sp + 'A'); if (fn) fn(data, result); return 0; } /* * params - parameters in the following format * "no_of_params\0param1\0param2\0param3\0...\0" * for example, string for 2 parameters with value 10 and 21 * is specified as "2\010\021\0". */ static int clariion_set_params(struct scsi_device *sdev, const char *params) { struct clariion_dh_data *csdev = sdev->handler_data; unsigned int hr = 0, st = 0, argc; const char *p = params; int result = SCSI_DH_OK; if ((sscanf(params, "%u", &argc) != 1) || (argc != 2)) return -EINVAL; while (*p++) ; if ((sscanf(p, "%u", &st) != 1) || (st > 1)) return -EINVAL; while (*p++) ; if ((sscanf(p, "%u", &hr) != 1) || (hr > 1)) return -EINVAL; if (st) csdev->flags |= CLARIION_SHORT_TRESPASS; else csdev->flags &= ~CLARIION_SHORT_TRESPASS; if (hr) csdev->flags |= CLARIION_HONOR_RESERVATIONS; else csdev->flags &= ~CLARIION_HONOR_RESERVATIONS; /* * If this path is owned, we have to send a trespass command * with the new parameters. If not, simply return. Next trespass * command would use the parameters. */ if (csdev->lun_state != CLARIION_LUN_OWNED) goto done; csdev->lun_state = CLARIION_LUN_UNINITIALIZED; result = send_trespass_cmd(sdev, csdev); if (result != SCSI_DH_OK) goto done; /* Update status */ result = clariion_send_inquiry(sdev, csdev); done: return result; } static int clariion_bus_attach(struct scsi_device *sdev) { struct clariion_dh_data *h; int err; h = kzalloc(sizeof(*h) , GFP_KERNEL); if (!h) return SCSI_DH_NOMEM; h->lun_state = CLARIION_LUN_UNINITIALIZED; h->default_sp = CLARIION_UNBOUND_LU; h->current_sp = CLARIION_UNBOUND_LU; err = clariion_std_inquiry(sdev, h); if (err != SCSI_DH_OK) goto failed; err = clariion_send_inquiry(sdev, h); if (err != SCSI_DH_OK) goto failed; sdev_printk(KERN_INFO, sdev, "%s: connected to SP %c Port %d (%s, default SP %c)\n", CLARIION_NAME, h->current_sp + 'A', h->port, lun_state[h->lun_state], h->default_sp + 'A'); sdev->handler_data = h; return SCSI_DH_OK; failed: kfree(h); return err; } static void clariion_bus_detach(struct scsi_device *sdev) { kfree(sdev->handler_data); sdev->handler_data = NULL; } static struct scsi_device_handler clariion_dh = { .name = CLARIION_NAME, .module = THIS_MODULE, .attach = clariion_bus_attach, .detach = clariion_bus_detach, .check_sense = clariion_check_sense, .activate = clariion_activate, .prep_fn = clariion_prep_fn, .set_params = clariion_set_params, }; static int __init clariion_init(void) { int r; r = scsi_register_device_handler(&clariion_dh); if (r != 0) printk(KERN_ERR "%s: Failed to register scsi device handler.", CLARIION_NAME); return r; } static void __exit clariion_exit(void) { scsi_unregister_device_handler(&clariion_dh); } module_init(clariion_init); module_exit(clariion_exit); MODULE_DESCRIPTION("EMC CX/AX/FC-family driver"); MODULE_AUTHOR("Mike Christie <[email protected]>, Chandra Seetharaman <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/scsi/device_handler/scsi_dh_emc.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Generic SCSI-3 ALUA SCSI Device Handler * * Copyright (C) 2007-2010 Hannes Reinecke, SUSE Linux Products GmbH. * All rights reserved. */ #include <linux/slab.h> #include <linux/delay.h> #include <linux/module.h> #include <asm/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_proto.h> #include <scsi/scsi_dbg.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_dh.h> #define ALUA_DH_NAME "alua" #define ALUA_DH_VER "2.0" #define TPGS_SUPPORT_NONE 0x00 #define TPGS_SUPPORT_OPTIMIZED 0x01 #define TPGS_SUPPORT_NONOPTIMIZED 0x02 #define TPGS_SUPPORT_STANDBY 0x04 #define TPGS_SUPPORT_UNAVAILABLE 0x08 #define TPGS_SUPPORT_LBA_DEPENDENT 0x10 #define TPGS_SUPPORT_OFFLINE 0x40 #define TPGS_SUPPORT_TRANSITION 0x80 #define TPGS_SUPPORT_ALL 0xdf #define RTPG_FMT_MASK 0x70 #define RTPG_FMT_EXT_HDR 0x10 #define TPGS_MODE_UNINITIALIZED -1 #define TPGS_MODE_NONE 0x0 #define TPGS_MODE_IMPLICIT 0x1 #define TPGS_MODE_EXPLICIT 0x2 #define ALUA_RTPG_SIZE 128 #define ALUA_FAILOVER_TIMEOUT 60 #define ALUA_FAILOVER_RETRIES 5 #define ALUA_RTPG_DELAY_MSECS 5 #define ALUA_RTPG_RETRY_DELAY 2 /* device handler flags */ #define ALUA_OPTIMIZE_STPG 0x01 #define ALUA_RTPG_EXT_HDR_UNSUPP 0x02 /* State machine flags */ #define ALUA_PG_RUN_RTPG 0x10 #define ALUA_PG_RUN_STPG 0x20 #define ALUA_PG_RUNNING 0x40 static uint optimize_stpg; module_param(optimize_stpg, uint, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(optimize_stpg, "Allow use of a non-optimized path, rather than sending a STPG, when implicit TPGS is supported (0=No,1=Yes). Default is 0."); static LIST_HEAD(port_group_list); static DEFINE_SPINLOCK(port_group_lock); static struct workqueue_struct *kaluad_wq; struct alua_port_group { struct kref kref; struct rcu_head rcu; struct list_head node; struct list_head dh_list; unsigned char device_id_str[256]; int device_id_len; int group_id; int tpgs; int state; int pref; int valid_states; unsigned flags; /* used for optimizing STPG */ unsigned char transition_tmo; unsigned long expiry; unsigned long interval; struct delayed_work rtpg_work; spinlock_t lock; struct list_head rtpg_list; struct scsi_device *rtpg_sdev; }; struct alua_dh_data { struct list_head node; struct alua_port_group __rcu *pg; int group_id; spinlock_t pg_lock; struct scsi_device *sdev; int init_error; struct mutex init_mutex; bool disabled; }; struct alua_queue_data { struct list_head entry; activate_complete callback_fn; void *callback_data; }; #define ALUA_POLICY_SWITCH_CURRENT 0 #define ALUA_POLICY_SWITCH_ALL 1 static void alua_rtpg_work(struct work_struct *work); static bool alua_rtpg_queue(struct alua_port_group *pg, struct scsi_device *sdev, struct alua_queue_data *qdata, bool force); static void alua_check(struct scsi_device *sdev, bool force); static void release_port_group(struct kref *kref) { struct alua_port_group *pg; pg = container_of(kref, struct alua_port_group, kref); if (pg->rtpg_sdev) flush_delayed_work(&pg->rtpg_work); spin_lock(&port_group_lock); list_del(&pg->node); spin_unlock(&port_group_lock); kfree_rcu(pg, rcu); } /* * submit_rtpg - Issue a REPORT TARGET GROUP STATES command * @sdev: sdev the command should be sent to */ static int submit_rtpg(struct scsi_device *sdev, unsigned char *buff, int bufflen, struct scsi_sense_hdr *sshdr, int flags) { u8 cdb[MAX_COMMAND_SIZE]; blk_opf_t opf = REQ_OP_DRV_IN | REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; const struct scsi_exec_args exec_args = { .sshdr = sshdr, }; /* Prepare the command. */ memset(cdb, 0x0, MAX_COMMAND_SIZE); cdb[0] = MAINTENANCE_IN; if (!(flags & ALUA_RTPG_EXT_HDR_UNSUPP)) cdb[1] = MI_REPORT_TARGET_PGS | MI_EXT_HDR_PARAM_FMT; else cdb[1] = MI_REPORT_TARGET_PGS; put_unaligned_be32(bufflen, &cdb[6]); return scsi_execute_cmd(sdev, cdb, opf, buff, bufflen, ALUA_FAILOVER_TIMEOUT * HZ, ALUA_FAILOVER_RETRIES, &exec_args); } /* * submit_stpg - Issue a SET TARGET PORT GROUP command * * Currently we're only setting the current target port group state * to 'active/optimized' and let the array firmware figure out * the states of the remaining groups. */ static int submit_stpg(struct scsi_device *sdev, int group_id, struct scsi_sense_hdr *sshdr) { u8 cdb[MAX_COMMAND_SIZE]; unsigned char stpg_data[8]; int stpg_len = 8; blk_opf_t opf = REQ_OP_DRV_OUT | REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; const struct scsi_exec_args exec_args = { .sshdr = sshdr, }; /* Prepare the data buffer */ memset(stpg_data, 0, stpg_len); stpg_data[4] = SCSI_ACCESS_STATE_OPTIMAL; put_unaligned_be16(group_id, &stpg_data[6]); /* Prepare the command. */ memset(cdb, 0x0, MAX_COMMAND_SIZE); cdb[0] = MAINTENANCE_OUT; cdb[1] = MO_SET_TARGET_PGS; put_unaligned_be32(stpg_len, &cdb[6]); return scsi_execute_cmd(sdev, cdb, opf, stpg_data, stpg_len, ALUA_FAILOVER_TIMEOUT * HZ, ALUA_FAILOVER_RETRIES, &exec_args); } static struct alua_port_group *alua_find_get_pg(char *id_str, size_t id_size, int group_id) { struct alua_port_group *pg; if (!id_str || !id_size || !strlen(id_str)) return NULL; list_for_each_entry(pg, &port_group_list, node) { if (pg->group_id != group_id) continue; if (!pg->device_id_len || pg->device_id_len != id_size) continue; if (strncmp(pg->device_id_str, id_str, id_size)) continue; if (!kref_get_unless_zero(&pg->kref)) continue; return pg; } return NULL; } /* * alua_alloc_pg - Allocate a new port_group structure * @sdev: scsi device * @group_id: port group id * @tpgs: target port group settings * * Allocate a new port_group structure for a given * device. */ static struct alua_port_group *alua_alloc_pg(struct scsi_device *sdev, int group_id, int tpgs) { struct alua_port_group *pg, *tmp_pg; pg = kzalloc(sizeof(struct alua_port_group), GFP_KERNEL); if (!pg) return ERR_PTR(-ENOMEM); pg->device_id_len = scsi_vpd_lun_id(sdev, pg->device_id_str, sizeof(pg->device_id_str)); if (pg->device_id_len <= 0) { /* * TPGS supported but no device identification found. * Generate private device identification. */ sdev_printk(KERN_INFO, sdev, "%s: No device descriptors found\n", ALUA_DH_NAME); pg->device_id_str[0] = '\0'; pg->device_id_len = 0; } pg->group_id = group_id; pg->tpgs = tpgs; pg->state = SCSI_ACCESS_STATE_OPTIMAL; pg->valid_states = TPGS_SUPPORT_ALL; if (optimize_stpg) pg->flags |= ALUA_OPTIMIZE_STPG; kref_init(&pg->kref); INIT_DELAYED_WORK(&pg->rtpg_work, alua_rtpg_work); INIT_LIST_HEAD(&pg->rtpg_list); INIT_LIST_HEAD(&pg->node); INIT_LIST_HEAD(&pg->dh_list); spin_lock_init(&pg->lock); spin_lock(&port_group_lock); tmp_pg = alua_find_get_pg(pg->device_id_str, pg->device_id_len, group_id); if (tmp_pg) { spin_unlock(&port_group_lock); kfree(pg); return tmp_pg; } list_add(&pg->node, &port_group_list); spin_unlock(&port_group_lock); return pg; } /* * alua_check_tpgs - Evaluate TPGS setting * @sdev: device to be checked * * Examine the TPGS setting of the sdev to find out if ALUA * is supported. */ static int alua_check_tpgs(struct scsi_device *sdev) { int tpgs = TPGS_MODE_NONE; /* * ALUA support for non-disk devices is fraught with * difficulties, so disable it for now. */ if (sdev->type != TYPE_DISK) { sdev_printk(KERN_INFO, sdev, "%s: disable for non-disk devices\n", ALUA_DH_NAME); return tpgs; } tpgs = scsi_device_tpgs(sdev); switch (tpgs) { case TPGS_MODE_EXPLICIT|TPGS_MODE_IMPLICIT: sdev_printk(KERN_INFO, sdev, "%s: supports implicit and explicit TPGS\n", ALUA_DH_NAME); break; case TPGS_MODE_EXPLICIT: sdev_printk(KERN_INFO, sdev, "%s: supports explicit TPGS\n", ALUA_DH_NAME); break; case TPGS_MODE_IMPLICIT: sdev_printk(KERN_INFO, sdev, "%s: supports implicit TPGS\n", ALUA_DH_NAME); break; case TPGS_MODE_NONE: sdev_printk(KERN_INFO, sdev, "%s: not supported\n", ALUA_DH_NAME); break; default: sdev_printk(KERN_INFO, sdev, "%s: unsupported TPGS setting %d\n", ALUA_DH_NAME, tpgs); tpgs = TPGS_MODE_NONE; break; } return tpgs; } /* * alua_check_vpd - Evaluate INQUIRY vpd page 0x83 * @sdev: device to be checked * * Extract the relative target port and the target port group * descriptor from the list of identificators. */ static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h, int tpgs) { int rel_port = -1, group_id; struct alua_port_group *pg, *old_pg = NULL; bool pg_updated = false; unsigned long flags; group_id = scsi_vpd_tpg_id(sdev, &rel_port); if (group_id < 0) { /* * Internal error; TPGS supported but required * VPD identification descriptors not present. * Disable ALUA support */ sdev_printk(KERN_INFO, sdev, "%s: No target port descriptors found\n", ALUA_DH_NAME); return SCSI_DH_DEV_UNSUPP; } pg = alua_alloc_pg(sdev, group_id, tpgs); if (IS_ERR(pg)) { if (PTR_ERR(pg) == -ENOMEM) return SCSI_DH_NOMEM; return SCSI_DH_DEV_UNSUPP; } if (pg->device_id_len) sdev_printk(KERN_INFO, sdev, "%s: device %s port group %x rel port %x\n", ALUA_DH_NAME, pg->device_id_str, group_id, rel_port); else sdev_printk(KERN_INFO, sdev, "%s: port group %x rel port %x\n", ALUA_DH_NAME, group_id, rel_port); kref_get(&pg->kref); /* Check for existing port group references */ spin_lock(&h->pg_lock); old_pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock)); if (old_pg != pg) { /* port group has changed. Update to new port group */ if (h->pg) { spin_lock_irqsave(&old_pg->lock, flags); list_del_rcu(&h->node); spin_unlock_irqrestore(&old_pg->lock, flags); } rcu_assign_pointer(h->pg, pg); pg_updated = true; } spin_lock_irqsave(&pg->lock, flags); if (pg_updated) list_add_rcu(&h->node, &pg->dh_list); spin_unlock_irqrestore(&pg->lock, flags); spin_unlock(&h->pg_lock); alua_rtpg_queue(pg, sdev, NULL, true); kref_put(&pg->kref, release_port_group); if (old_pg) kref_put(&old_pg->kref, release_port_group); return SCSI_DH_OK; } static char print_alua_state(unsigned char state) { switch (state) { case SCSI_ACCESS_STATE_OPTIMAL: return 'A'; case SCSI_ACCESS_STATE_ACTIVE: return 'N'; case SCSI_ACCESS_STATE_STANDBY: return 'S'; case SCSI_ACCESS_STATE_UNAVAILABLE: return 'U'; case SCSI_ACCESS_STATE_LBA: return 'L'; case SCSI_ACCESS_STATE_OFFLINE: return 'O'; case SCSI_ACCESS_STATE_TRANSITIONING: return 'T'; default: return 'X'; } } static enum scsi_disposition alua_check_sense(struct scsi_device *sdev, struct scsi_sense_hdr *sense_hdr) { struct alua_dh_data *h = sdev->handler_data; struct alua_port_group *pg; switch (sense_hdr->sense_key) { case NOT_READY: if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) { /* * LUN Not Accessible - ALUA state transition */ rcu_read_lock(); pg = rcu_dereference(h->pg); if (pg) pg->state = SCSI_ACCESS_STATE_TRANSITIONING; rcu_read_unlock(); alua_check(sdev, false); return NEEDS_RETRY; } break; case UNIT_ATTENTION: if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00) { /* * Power On, Reset, or Bus Device Reset. * Might have obscured a state transition, * so schedule a recheck. */ alua_check(sdev, true); return ADD_TO_MLQUEUE; } if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x04) /* * Device internal reset */ return ADD_TO_MLQUEUE; if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x01) /* * Mode Parameters Changed */ return ADD_TO_MLQUEUE; if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06) { /* * ALUA state changed */ alua_check(sdev, true); return ADD_TO_MLQUEUE; } if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x07) { /* * Implicit ALUA state transition failed */ alua_check(sdev, true); return ADD_TO_MLQUEUE; } if (sense_hdr->asc == 0x3f && sense_hdr->ascq == 0x03) /* * Inquiry data has changed */ return ADD_TO_MLQUEUE; if (sense_hdr->asc == 0x3f && sense_hdr->ascq == 0x0e) /* * REPORTED_LUNS_DATA_HAS_CHANGED is reported * when switching controllers on targets like * Intel Multi-Flex. We can just retry. */ return ADD_TO_MLQUEUE; break; } return SCSI_RETURN_NOT_HANDLED; } /* * alua_tur - Send a TEST UNIT READY * @sdev: device to which the TEST UNIT READY command should be send * * Send a TEST UNIT READY to @sdev to figure out the device state * Returns SCSI_DH_RETRY if the sense code is NOT READY/ALUA TRANSITIONING, * SCSI_DH_OK if no error occurred, and SCSI_DH_IO otherwise. */ static int alua_tur(struct scsi_device *sdev) { struct scsi_sense_hdr sense_hdr; int retval; retval = scsi_test_unit_ready(sdev, ALUA_FAILOVER_TIMEOUT * HZ, ALUA_FAILOVER_RETRIES, &sense_hdr); if (sense_hdr.sense_key == NOT_READY && sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a) return SCSI_DH_RETRY; else if (retval) return SCSI_DH_IO; else return SCSI_DH_OK; } /* * alua_rtpg - Evaluate REPORT TARGET GROUP STATES * @sdev: the device to be evaluated. * * Evaluate the Target Port Group State. * Returns SCSI_DH_DEV_OFFLINED if the path is * found to be unusable. */ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg) { struct scsi_sense_hdr sense_hdr; struct alua_port_group *tmp_pg; int len, k, off, bufflen = ALUA_RTPG_SIZE; int group_id_old, state_old, pref_old, valid_states_old; unsigned char *desc, *buff; unsigned err; int retval; unsigned int tpg_desc_tbl_off; unsigned char orig_transition_tmo; unsigned long flags; bool transitioning_sense = false; group_id_old = pg->group_id; state_old = pg->state; pref_old = pg->pref; valid_states_old = pg->valid_states; if (!pg->expiry) { unsigned long transition_tmo = ALUA_FAILOVER_TIMEOUT * HZ; if (pg->transition_tmo) transition_tmo = pg->transition_tmo * HZ; pg->expiry = round_jiffies_up(jiffies + transition_tmo); } buff = kzalloc(bufflen, GFP_KERNEL); if (!buff) return SCSI_DH_DEV_TEMP_BUSY; retry: err = 0; retval = submit_rtpg(sdev, buff, bufflen, &sense_hdr, pg->flags); if (retval) { /* * Some (broken) implementations have a habit of returning * an error during things like firmware update etc. * But if the target only supports active/optimized there's * not much we can do; it's not that we can switch paths * or anything. * So ignore any errors to avoid spurious failures during * path failover. */ if ((pg->valid_states & ~TPGS_SUPPORT_OPTIMIZED) == 0) { sdev_printk(KERN_INFO, sdev, "%s: ignoring rtpg result %d\n", ALUA_DH_NAME, retval); kfree(buff); return SCSI_DH_OK; } if (retval < 0 || !scsi_sense_valid(&sense_hdr)) { sdev_printk(KERN_INFO, sdev, "%s: rtpg failed, result %d\n", ALUA_DH_NAME, retval); kfree(buff); if (retval < 0) return SCSI_DH_DEV_TEMP_BUSY; if (host_byte(retval) == DID_NO_CONNECT) return SCSI_DH_RES_TEMP_UNAVAIL; return SCSI_DH_IO; } /* * submit_rtpg() has failed on existing arrays * when requesting extended header info, and * the array doesn't support extended headers, * even though it shouldn't according to T10. * The retry without rtpg_ext_hdr_req set * handles this. * Note: some arrays return a sense key of ILLEGAL_REQUEST * with ASC 00h if they don't support the extended header. */ if (!(pg->flags & ALUA_RTPG_EXT_HDR_UNSUPP) && sense_hdr.sense_key == ILLEGAL_REQUEST) { pg->flags |= ALUA_RTPG_EXT_HDR_UNSUPP; goto retry; } /* * If the array returns with 'ALUA state transition' * sense code here it cannot return RTPG data during * transition. So set the state to 'transitioning' directly. */ if (sense_hdr.sense_key == NOT_READY && sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a) { transitioning_sense = true; goto skip_rtpg; } /* * Retry on any other UNIT ATTENTION occurred. */ if (sense_hdr.sense_key == UNIT_ATTENTION) err = SCSI_DH_RETRY; if (err == SCSI_DH_RETRY && pg->expiry != 0 && time_before(jiffies, pg->expiry)) { sdev_printk(KERN_ERR, sdev, "%s: rtpg retry\n", ALUA_DH_NAME); scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr); kfree(buff); return err; } sdev_printk(KERN_ERR, sdev, "%s: rtpg failed\n", ALUA_DH_NAME); scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr); kfree(buff); pg->expiry = 0; return SCSI_DH_IO; } len = get_unaligned_be32(&buff[0]) + 4; if (len > bufflen) { /* Resubmit with the correct length */ kfree(buff); bufflen = len; buff = kmalloc(bufflen, GFP_KERNEL); if (!buff) { sdev_printk(KERN_WARNING, sdev, "%s: kmalloc buffer failed\n",__func__); /* Temporary failure, bypass */ pg->expiry = 0; return SCSI_DH_DEV_TEMP_BUSY; } goto retry; } orig_transition_tmo = pg->transition_tmo; if ((buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR && buff[5] != 0) pg->transition_tmo = buff[5]; else pg->transition_tmo = ALUA_FAILOVER_TIMEOUT; if (orig_transition_tmo != pg->transition_tmo) { sdev_printk(KERN_INFO, sdev, "%s: transition timeout set to %d seconds\n", ALUA_DH_NAME, pg->transition_tmo); pg->expiry = jiffies + pg->transition_tmo * HZ; } if ((buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR) tpg_desc_tbl_off = 8; else tpg_desc_tbl_off = 4; for (k = tpg_desc_tbl_off, desc = buff + tpg_desc_tbl_off; k < len; k += off, desc += off) { u16 group_id = get_unaligned_be16(&desc[2]); spin_lock_irqsave(&port_group_lock, flags); tmp_pg = alua_find_get_pg(pg->device_id_str, pg->device_id_len, group_id); spin_unlock_irqrestore(&port_group_lock, flags); if (tmp_pg) { if (spin_trylock_irqsave(&tmp_pg->lock, flags)) { if ((tmp_pg == pg) || !(tmp_pg->flags & ALUA_PG_RUNNING)) { struct alua_dh_data *h; tmp_pg->state = desc[0] & 0x0f; tmp_pg->pref = desc[0] >> 7; rcu_read_lock(); list_for_each_entry_rcu(h, &tmp_pg->dh_list, node) { if (!h->sdev) continue; h->sdev->access_state = desc[0]; } rcu_read_unlock(); } if (tmp_pg == pg) tmp_pg->valid_states = desc[1]; spin_unlock_irqrestore(&tmp_pg->lock, flags); } kref_put(&tmp_pg->kref, release_port_group); } off = 8 + (desc[7] * 4); } skip_rtpg: spin_lock_irqsave(&pg->lock, flags); if (transitioning_sense) pg->state = SCSI_ACCESS_STATE_TRANSITIONING; if (group_id_old != pg->group_id || state_old != pg->state || pref_old != pg->pref || valid_states_old != pg->valid_states) sdev_printk(KERN_INFO, sdev, "%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n", ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state), pg->pref ? "preferred" : "non-preferred", pg->valid_states&TPGS_SUPPORT_TRANSITION?'T':'t', pg->valid_states&TPGS_SUPPORT_OFFLINE?'O':'o', pg->valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l', pg->valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u', pg->valid_states&TPGS_SUPPORT_STANDBY?'S':'s', pg->valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n', pg->valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a'); switch (pg->state) { case SCSI_ACCESS_STATE_TRANSITIONING: if (time_before(jiffies, pg->expiry)) { /* State transition, retry */ pg->interval = ALUA_RTPG_RETRY_DELAY; err = SCSI_DH_RETRY; } else { struct alua_dh_data *h; /* Transitioning time exceeded, set port to standby */ err = SCSI_DH_IO; pg->state = SCSI_ACCESS_STATE_STANDBY; pg->expiry = 0; rcu_read_lock(); list_for_each_entry_rcu(h, &pg->dh_list, node) { if (!h->sdev) continue; h->sdev->access_state = (pg->state & SCSI_ACCESS_STATE_MASK); if (pg->pref) h->sdev->access_state |= SCSI_ACCESS_STATE_PREFERRED; } rcu_read_unlock(); } break; case SCSI_ACCESS_STATE_OFFLINE: /* Path unusable */ err = SCSI_DH_DEV_OFFLINED; pg->expiry = 0; break; default: /* Useable path if active */ err = SCSI_DH_OK; pg->expiry = 0; break; } spin_unlock_irqrestore(&pg->lock, flags); kfree(buff); return err; } /* * alua_stpg - Issue a SET TARGET PORT GROUP command * * Issue a SET TARGET PORT GROUP command and evaluate the * response. Returns SCSI_DH_RETRY per default to trigger * a re-evaluation of the target group state or SCSI_DH_OK * if no further action needs to be taken. */ static unsigned alua_stpg(struct scsi_device *sdev, struct alua_port_group *pg) { int retval; struct scsi_sense_hdr sense_hdr; if (!(pg->tpgs & TPGS_MODE_EXPLICIT)) { /* Only implicit ALUA supported, retry */ return SCSI_DH_RETRY; } switch (pg->state) { case SCSI_ACCESS_STATE_OPTIMAL: return SCSI_DH_OK; case SCSI_ACCESS_STATE_ACTIVE: if ((pg->flags & ALUA_OPTIMIZE_STPG) && !pg->pref && (pg->tpgs & TPGS_MODE_IMPLICIT)) return SCSI_DH_OK; break; case SCSI_ACCESS_STATE_STANDBY: case SCSI_ACCESS_STATE_UNAVAILABLE: break; case SCSI_ACCESS_STATE_OFFLINE: return SCSI_DH_IO; case SCSI_ACCESS_STATE_TRANSITIONING: break; default: sdev_printk(KERN_INFO, sdev, "%s: stpg failed, unhandled TPGS state %d", ALUA_DH_NAME, pg->state); return SCSI_DH_NOSYS; } retval = submit_stpg(sdev, pg->group_id, &sense_hdr); if (retval) { if (retval < 0 || !scsi_sense_valid(&sense_hdr)) { sdev_printk(KERN_INFO, sdev, "%s: stpg failed, result %d", ALUA_DH_NAME, retval); if (retval < 0) return SCSI_DH_DEV_TEMP_BUSY; } else { sdev_printk(KERN_INFO, sdev, "%s: stpg failed\n", ALUA_DH_NAME); scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr); } } /* Retry RTPG */ return SCSI_DH_RETRY; } /* * The caller must call scsi_device_put() on the returned pointer if it is not * NULL. */ static struct scsi_device * __must_check alua_rtpg_select_sdev(struct alua_port_group *pg) { struct alua_dh_data *h; struct scsi_device *sdev = NULL, *prev_sdev; lockdep_assert_held(&pg->lock); if (WARN_ON(!pg->rtpg_sdev)) return NULL; /* * RCU protection isn't necessary for dh_list here * as we hold pg->lock, but for access to h->pg. */ rcu_read_lock(); list_for_each_entry_rcu(h, &pg->dh_list, node) { if (!h->sdev) continue; if (h->sdev == pg->rtpg_sdev) { h->disabled = true; continue; } if (rcu_dereference(h->pg) == pg && !h->disabled && !scsi_device_get(h->sdev)) { sdev = h->sdev; break; } } rcu_read_unlock(); if (!sdev) { pr_warn("%s: no device found for rtpg\n", (pg->device_id_len ? (char *)pg->device_id_str : "(nameless PG)")); return NULL; } sdev_printk(KERN_INFO, sdev, "rtpg retry on different device\n"); prev_sdev = pg->rtpg_sdev; pg->rtpg_sdev = sdev; return prev_sdev; } static void alua_rtpg_work(struct work_struct *work) { struct alua_port_group *pg = container_of(work, struct alua_port_group, rtpg_work.work); struct scsi_device *sdev, *prev_sdev = NULL; LIST_HEAD(qdata_list); int err = SCSI_DH_OK; struct alua_queue_data *qdata, *tmp; struct alua_dh_data *h; unsigned long flags; spin_lock_irqsave(&pg->lock, flags); sdev = pg->rtpg_sdev; if (!sdev) { WARN_ON(pg->flags & ALUA_PG_RUN_RTPG); WARN_ON(pg->flags & ALUA_PG_RUN_STPG); spin_unlock_irqrestore(&pg->lock, flags); kref_put(&pg->kref, release_port_group); return; } pg->flags |= ALUA_PG_RUNNING; if (pg->flags & ALUA_PG_RUN_RTPG) { int state = pg->state; pg->flags &= ~ALUA_PG_RUN_RTPG; spin_unlock_irqrestore(&pg->lock, flags); if (state == SCSI_ACCESS_STATE_TRANSITIONING) { if (alua_tur(sdev) == SCSI_DH_RETRY) { spin_lock_irqsave(&pg->lock, flags); pg->flags &= ~ALUA_PG_RUNNING; pg->flags |= ALUA_PG_RUN_RTPG; if (!pg->interval) pg->interval = ALUA_RTPG_RETRY_DELAY; spin_unlock_irqrestore(&pg->lock, flags); queue_delayed_work(kaluad_wq, &pg->rtpg_work, pg->interval * HZ); return; } /* Send RTPG on failure or if TUR indicates SUCCESS */ } err = alua_rtpg(sdev, pg); spin_lock_irqsave(&pg->lock, flags); /* If RTPG failed on the current device, try using another */ if (err == SCSI_DH_RES_TEMP_UNAVAIL && (prev_sdev = alua_rtpg_select_sdev(pg))) err = SCSI_DH_IMM_RETRY; if (err == SCSI_DH_RETRY || err == SCSI_DH_IMM_RETRY || pg->flags & ALUA_PG_RUN_RTPG) { pg->flags &= ~ALUA_PG_RUNNING; if (err == SCSI_DH_IMM_RETRY) pg->interval = 0; else if (!pg->interval && !(pg->flags & ALUA_PG_RUN_RTPG)) pg->interval = ALUA_RTPG_RETRY_DELAY; pg->flags |= ALUA_PG_RUN_RTPG; spin_unlock_irqrestore(&pg->lock, flags); goto queue_rtpg; } if (err != SCSI_DH_OK) pg->flags &= ~ALUA_PG_RUN_STPG; } if (pg->flags & ALUA_PG_RUN_STPG) { pg->flags &= ~ALUA_PG_RUN_STPG; spin_unlock_irqrestore(&pg->lock, flags); err = alua_stpg(sdev, pg); spin_lock_irqsave(&pg->lock, flags); if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) { pg->flags |= ALUA_PG_RUN_RTPG; pg->interval = 0; pg->flags &= ~ALUA_PG_RUNNING; spin_unlock_irqrestore(&pg->lock, flags); goto queue_rtpg; } } list_splice_init(&pg->rtpg_list, &qdata_list); /* * We went through an RTPG, for good or bad. * Re-enable all devices for the next attempt. */ list_for_each_entry(h, &pg->dh_list, node) h->disabled = false; pg->rtpg_sdev = NULL; spin_unlock_irqrestore(&pg->lock, flags); if (prev_sdev) scsi_device_put(prev_sdev); list_for_each_entry_safe(qdata, tmp, &qdata_list, entry) { list_del(&qdata->entry); if (qdata->callback_fn) qdata->callback_fn(qdata->callback_data, err); kfree(qdata); } spin_lock_irqsave(&pg->lock, flags); pg->flags &= ~ALUA_PG_RUNNING; spin_unlock_irqrestore(&pg->lock, flags); scsi_device_put(sdev); kref_put(&pg->kref, release_port_group); return; queue_rtpg: if (prev_sdev) scsi_device_put(prev_sdev); queue_delayed_work(kaluad_wq, &pg->rtpg_work, pg->interval * HZ); } /** * alua_rtpg_queue() - cause RTPG to be submitted asynchronously * @pg: ALUA port group associated with @sdev. * @sdev: SCSI device for which to submit an RTPG. * @qdata: Information about the callback to invoke after the RTPG. * @force: Whether or not to submit an RTPG if a work item that will submit an * RTPG already has been scheduled. * * Returns true if and only if alua_rtpg_work() will be called asynchronously. * That function is responsible for calling @qdata->fn(). * * Context: may be called from atomic context (alua_check()) only if the caller * holds an sdev reference. */ static bool alua_rtpg_queue(struct alua_port_group *pg, struct scsi_device *sdev, struct alua_queue_data *qdata, bool force) { int start_queue = 0; unsigned long flags; if (WARN_ON_ONCE(!pg) || scsi_device_get(sdev)) return false; spin_lock_irqsave(&pg->lock, flags); if (qdata) { list_add_tail(&qdata->entry, &pg->rtpg_list); pg->flags |= ALUA_PG_RUN_STPG; force = true; } if (pg->rtpg_sdev == NULL) { struct alua_dh_data *h = sdev->handler_data; rcu_read_lock(); if (h && rcu_dereference(h->pg) == pg) { pg->interval = 0; pg->flags |= ALUA_PG_RUN_RTPG; kref_get(&pg->kref); pg->rtpg_sdev = sdev; start_queue = 1; } rcu_read_unlock(); } else if (!(pg->flags & ALUA_PG_RUN_RTPG) && force) { pg->flags |= ALUA_PG_RUN_RTPG; /* Do not queue if the worker is already running */ if (!(pg->flags & ALUA_PG_RUNNING)) { kref_get(&pg->kref); start_queue = 1; } } spin_unlock_irqrestore(&pg->lock, flags); if (start_queue) { if (queue_delayed_work(kaluad_wq, &pg->rtpg_work, msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS))) sdev = NULL; else kref_put(&pg->kref, release_port_group); } if (sdev) scsi_device_put(sdev); return true; } /* * alua_initialize - Initialize ALUA state * @sdev: the device to be initialized * * For the prep_fn to work correctly we have * to initialize the ALUA state for the device. */ static int alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h) { int err = SCSI_DH_DEV_UNSUPP, tpgs; mutex_lock(&h->init_mutex); h->disabled = false; tpgs = alua_check_tpgs(sdev); if (tpgs != TPGS_MODE_NONE) err = alua_check_vpd(sdev, h, tpgs); h->init_error = err; mutex_unlock(&h->init_mutex); return err; } /* * alua_set_params - set/unset the optimize flag * @sdev: device on the path to be activated * params - parameters in the following format * "no_of_params\0param1\0param2\0param3\0...\0" * For example, to set the flag pass the following parameters * from multipath.conf * hardware_handler "2 alua 1" */ static int alua_set_params(struct scsi_device *sdev, const char *params) { struct alua_dh_data *h = sdev->handler_data; struct alua_port_group *pg = NULL; unsigned int optimize = 0, argc; const char *p = params; int result = SCSI_DH_OK; unsigned long flags; if ((sscanf(params, "%u", &argc) != 1) || (argc != 1)) return -EINVAL; while (*p++) ; if ((sscanf(p, "%u", &optimize) != 1) || (optimize > 1)) return -EINVAL; rcu_read_lock(); pg = rcu_dereference(h->pg); if (!pg) { rcu_read_unlock(); return -ENXIO; } spin_lock_irqsave(&pg->lock, flags); if (optimize) pg->flags |= ALUA_OPTIMIZE_STPG; else pg->flags &= ~ALUA_OPTIMIZE_STPG; spin_unlock_irqrestore(&pg->lock, flags); rcu_read_unlock(); return result; } /* * alua_activate - activate a path * @sdev: device on the path to be activated * * We're currently switching the port group to be activated only and * let the array figure out the rest. * There may be other arrays which require us to switch all port groups * based on a certain policy. But until we actually encounter them it * should be okay. */ static int alua_activate(struct scsi_device *sdev, activate_complete fn, void *data) { struct alua_dh_data *h = sdev->handler_data; int err = SCSI_DH_OK; struct alua_queue_data *qdata; struct alua_port_group *pg; qdata = kzalloc(sizeof(*qdata), GFP_KERNEL); if (!qdata) { err = SCSI_DH_RES_TEMP_UNAVAIL; goto out; } qdata->callback_fn = fn; qdata->callback_data = data; mutex_lock(&h->init_mutex); rcu_read_lock(); pg = rcu_dereference(h->pg); if (!pg || !kref_get_unless_zero(&pg->kref)) { rcu_read_unlock(); kfree(qdata); err = h->init_error; mutex_unlock(&h->init_mutex); goto out; } rcu_read_unlock(); mutex_unlock(&h->init_mutex); if (alua_rtpg_queue(pg, sdev, qdata, true)) { fn = NULL; } else { kfree(qdata); err = SCSI_DH_DEV_OFFLINED; } kref_put(&pg->kref, release_port_group); out: if (fn) fn(data, err); return 0; } /* * alua_check - check path status * @sdev: device on the path to be checked * * Check the device status */ static void alua_check(struct scsi_device *sdev, bool force) { struct alua_dh_data *h = sdev->handler_data; struct alua_port_group *pg; rcu_read_lock(); pg = rcu_dereference(h->pg); if (!pg || !kref_get_unless_zero(&pg->kref)) { rcu_read_unlock(); return; } rcu_read_unlock(); alua_rtpg_queue(pg, sdev, NULL, force); kref_put(&pg->kref, release_port_group); } /* * alua_prep_fn - request callback * * Fail I/O to all paths not in state * active/optimized or active/non-optimized. */ static blk_status_t alua_prep_fn(struct scsi_device *sdev, struct request *req) { struct alua_dh_data *h = sdev->handler_data; struct alua_port_group *pg; unsigned char state = SCSI_ACCESS_STATE_OPTIMAL; rcu_read_lock(); pg = rcu_dereference(h->pg); if (pg) state = pg->state; rcu_read_unlock(); switch (state) { case SCSI_ACCESS_STATE_OPTIMAL: case SCSI_ACCESS_STATE_ACTIVE: case SCSI_ACCESS_STATE_LBA: case SCSI_ACCESS_STATE_TRANSITIONING: return BLK_STS_OK; default: req->rq_flags |= RQF_QUIET; return BLK_STS_IOERR; } } static void alua_rescan(struct scsi_device *sdev) { struct alua_dh_data *h = sdev->handler_data; alua_initialize(sdev, h); } /* * alua_bus_attach - Attach device handler * @sdev: device to be attached to */ static int alua_bus_attach(struct scsi_device *sdev) { struct alua_dh_data *h; int err; h = kzalloc(sizeof(*h) , GFP_KERNEL); if (!h) return SCSI_DH_NOMEM; spin_lock_init(&h->pg_lock); rcu_assign_pointer(h->pg, NULL); h->init_error = SCSI_DH_OK; h->sdev = sdev; INIT_LIST_HEAD(&h->node); mutex_init(&h->init_mutex); err = alua_initialize(sdev, h); if (err != SCSI_DH_OK && err != SCSI_DH_DEV_OFFLINED) goto failed; sdev->handler_data = h; return SCSI_DH_OK; failed: kfree(h); return err; } /* * alua_bus_detach - Detach device handler * @sdev: device to be detached from */ static void alua_bus_detach(struct scsi_device *sdev) { struct alua_dh_data *h = sdev->handler_data; struct alua_port_group *pg; spin_lock(&h->pg_lock); pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock)); rcu_assign_pointer(h->pg, NULL); spin_unlock(&h->pg_lock); if (pg) { spin_lock_irq(&pg->lock); list_del_rcu(&h->node); spin_unlock_irq(&pg->lock); kref_put(&pg->kref, release_port_group); } sdev->handler_data = NULL; synchronize_rcu(); kfree(h); } static struct scsi_device_handler alua_dh = { .name = ALUA_DH_NAME, .module = THIS_MODULE, .attach = alua_bus_attach, .detach = alua_bus_detach, .prep_fn = alua_prep_fn, .check_sense = alua_check_sense, .activate = alua_activate, .rescan = alua_rescan, .set_params = alua_set_params, }; static int __init alua_init(void) { int r; kaluad_wq = alloc_workqueue("kaluad", WQ_MEM_RECLAIM, 0); if (!kaluad_wq) return -ENOMEM; r = scsi_register_device_handler(&alua_dh); if (r != 0) { printk(KERN_ERR "%s: Failed to register scsi device handler", ALUA_DH_NAME); destroy_workqueue(kaluad_wq); } return r; } static void __exit alua_exit(void) { scsi_unregister_device_handler(&alua_dh); destroy_workqueue(kaluad_wq); } module_init(alua_init); module_exit(alua_exit); MODULE_DESCRIPTION("DM Multipath ALUA support"); MODULE_AUTHOR("Hannes Reinecke <[email protected]>"); MODULE_LICENSE("GPL"); MODULE_VERSION(ALUA_DH_VER);
linux-master
drivers/scsi/device_handler/scsi_dh_alua.c
/* * LSI/Engenio/NetApp E-Series RDAC SCSI Device Handler * * Copyright (C) 2005 Mike Christie. All rights reserved. * Copyright (C) Chandra Seetharaman, IBM Corp. 2007 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * */ #include <scsi/scsi.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_dh.h> #include <linux/workqueue.h> #include <linux/slab.h> #include <linux/module.h> #define RDAC_NAME "rdac" #define RDAC_RETRY_COUNT 5 /* * LSI mode page stuff * * These struct definitions and the forming of the * mode page were taken from the LSI RDAC 2.4 GPL'd * driver, and then converted to Linux conventions. */ #define RDAC_QUIESCENCE_TIME 20 /* * Page Codes */ #define RDAC_PAGE_CODE_REDUNDANT_CONTROLLER 0x2c /* * Controller modes definitions */ #define RDAC_MODE_TRANSFER_SPECIFIED_LUNS 0x02 /* * RDAC Options field */ #define RDAC_FORCED_QUIESENCE 0x02 #define RDAC_TIMEOUT (60 * HZ) #define RDAC_RETRIES 3 struct rdac_mode_6_hdr { u8 data_len; u8 medium_type; u8 device_params; u8 block_desc_len; }; struct rdac_mode_10_hdr { u16 data_len; u8 medium_type; u8 device_params; u16 reserved; u16 block_desc_len; }; struct rdac_mode_common { u8 controller_serial[16]; u8 alt_controller_serial[16]; u8 rdac_mode[2]; u8 alt_rdac_mode[2]; u8 quiescence_timeout; u8 rdac_options; }; struct rdac_pg_legacy { struct rdac_mode_6_hdr hdr; u8 page_code; u8 page_len; struct rdac_mode_common common; #define MODE6_MAX_LUN 32 u8 lun_table[MODE6_MAX_LUN]; u8 reserved2[32]; u8 reserved3; u8 reserved4; }; struct rdac_pg_expanded { struct rdac_mode_10_hdr hdr; u8 page_code; u8 subpage_code; u8 page_len[2]; struct rdac_mode_common common; u8 lun_table[256]; u8 reserved3; u8 reserved4; }; struct c9_inquiry { u8 peripheral_info; u8 page_code; /* 0xC9 */ u8 reserved1; u8 page_len; u8 page_id[4]; /* "vace" */ u8 avte_cvp; u8 path_prio; u8 reserved2[38]; }; #define SUBSYS_ID_LEN 16 #define SLOT_ID_LEN 2 #define ARRAY_LABEL_LEN 31 struct c4_inquiry { u8 peripheral_info; u8 page_code; /* 0xC4 */ u8 reserved1; u8 page_len; u8 page_id[4]; /* "subs" */ u8 subsys_id[SUBSYS_ID_LEN]; u8 revision[4]; u8 slot_id[SLOT_ID_LEN]; u8 reserved[2]; }; #define UNIQUE_ID_LEN 16 struct c8_inquiry { u8 peripheral_info; u8 page_code; /* 0xC8 */ u8 reserved1; u8 page_len; u8 page_id[4]; /* "edid" */ u8 reserved2[3]; u8 vol_uniq_id_len; u8 vol_uniq_id[16]; u8 vol_user_label_len; u8 vol_user_label[60]; u8 array_uniq_id_len; u8 array_unique_id[UNIQUE_ID_LEN]; u8 array_user_label_len; u8 array_user_label[60]; u8 lun[8]; }; struct rdac_controller { u8 array_id[UNIQUE_ID_LEN]; int use_ms10; struct kref kref; struct list_head node; /* list of all controllers */ union { struct rdac_pg_legacy legacy; struct rdac_pg_expanded expanded; } mode_select; u8 index; u8 array_name[ARRAY_LABEL_LEN]; struct Scsi_Host *host; spinlock_t ms_lock; int ms_queued; struct work_struct ms_work; struct scsi_device *ms_sdev; struct list_head ms_head; struct list_head dh_list; }; struct c2_inquiry { u8 peripheral_info; u8 page_code; /* 0xC2 */ u8 reserved1; u8 page_len; u8 page_id[4]; /* "swr4" */ u8 sw_version[3]; u8 sw_date[3]; u8 features_enabled; u8 max_lun_supported; u8 partitions[239]; /* Total allocation length should be 0xFF */ }; struct rdac_dh_data { struct list_head node; struct rdac_controller *ctlr; struct scsi_device *sdev; #define UNINITIALIZED_LUN (1 << 8) unsigned lun; #define RDAC_MODE 0 #define RDAC_MODE_AVT 1 #define RDAC_MODE_IOSHIP 2 unsigned char mode; #define RDAC_STATE_ACTIVE 0 #define RDAC_STATE_PASSIVE 1 unsigned char state; #define RDAC_LUN_UNOWNED 0 #define RDAC_LUN_OWNED 1 char lun_state; #define RDAC_PREFERRED 0 #define RDAC_NON_PREFERRED 1 char preferred; union { struct c2_inquiry c2; struct c4_inquiry c4; struct c8_inquiry c8; struct c9_inquiry c9; } inq; }; static const char *mode[] = { "RDAC", "AVT", "IOSHIP", }; static const char *lun_state[] = { "unowned", "owned", }; struct rdac_queue_data { struct list_head entry; struct rdac_dh_data *h; activate_complete callback_fn; void *callback_data; }; static LIST_HEAD(ctlr_list); static DEFINE_SPINLOCK(list_lock); static struct workqueue_struct *kmpath_rdacd; static void send_mode_select(struct work_struct *work); /* * module parameter to enable rdac debug logging. * 2 bits for each type of logging, only two types defined for now * Can be enhanced if required at later point */ static int rdac_logging = 1; module_param(rdac_logging, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(rdac_logging, "A bit mask of rdac logging levels, " "Default is 1 - failover logging enabled, " "set it to 0xF to enable all the logs"); #define RDAC_LOG_FAILOVER 0 #define RDAC_LOG_SENSE 2 #define RDAC_LOG_BITS 2 #define RDAC_LOG_LEVEL(SHIFT) \ ((rdac_logging >> (SHIFT)) & ((1 << (RDAC_LOG_BITS)) - 1)) #define RDAC_LOG(SHIFT, sdev, f, arg...) \ do { \ if (unlikely(RDAC_LOG_LEVEL(SHIFT))) \ sdev_printk(KERN_INFO, sdev, RDAC_NAME ": " f "\n", ## arg); \ } while (0); static unsigned int rdac_failover_get(struct rdac_controller *ctlr, struct list_head *list, unsigned char *cdb) { struct rdac_mode_common *common; unsigned data_size; struct rdac_queue_data *qdata; u8 *lun_table; if (ctlr->use_ms10) { struct rdac_pg_expanded *rdac_pg; data_size = sizeof(struct rdac_pg_expanded); rdac_pg = &ctlr->mode_select.expanded; memset(rdac_pg, 0, data_size); common = &rdac_pg->common; rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40; rdac_pg->subpage_code = 0x1; rdac_pg->page_len[0] = 0x01; rdac_pg->page_len[1] = 0x28; lun_table = rdac_pg->lun_table; } else { struct rdac_pg_legacy *rdac_pg; data_size = sizeof(struct rdac_pg_legacy); rdac_pg = &ctlr->mode_select.legacy; memset(rdac_pg, 0, data_size); common = &rdac_pg->common; rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER; rdac_pg->page_len = 0x68; lun_table = rdac_pg->lun_table; } common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS; common->quiescence_timeout = RDAC_QUIESCENCE_TIME; common->rdac_options = RDAC_FORCED_QUIESENCE; list_for_each_entry(qdata, list, entry) { lun_table[qdata->h->lun] = 0x81; } /* Prepare the command. */ if (ctlr->use_ms10) { cdb[0] = MODE_SELECT_10; cdb[7] = data_size >> 8; cdb[8] = data_size & 0xff; } else { cdb[0] = MODE_SELECT; cdb[4] = data_size; } return data_size; } static void release_controller(struct kref *kref) { struct rdac_controller *ctlr; ctlr = container_of(kref, struct rdac_controller, kref); list_del(&ctlr->node); kfree(ctlr); } static struct rdac_controller *get_controller(int index, char *array_name, u8 *array_id, struct scsi_device *sdev) { struct rdac_controller *ctlr, *tmp; list_for_each_entry(tmp, &ctlr_list, node) { if ((memcmp(tmp->array_id, array_id, UNIQUE_ID_LEN) == 0) && (tmp->index == index) && (tmp->host == sdev->host)) { kref_get(&tmp->kref); return tmp; } } ctlr = kmalloc(sizeof(*ctlr), GFP_ATOMIC); if (!ctlr) return NULL; /* initialize fields of controller */ memcpy(ctlr->array_id, array_id, UNIQUE_ID_LEN); ctlr->index = index; ctlr->host = sdev->host; memcpy(ctlr->array_name, array_name, ARRAY_LABEL_LEN); kref_init(&ctlr->kref); ctlr->use_ms10 = -1; ctlr->ms_queued = 0; ctlr->ms_sdev = NULL; spin_lock_init(&ctlr->ms_lock); INIT_WORK(&ctlr->ms_work, send_mode_select); INIT_LIST_HEAD(&ctlr->ms_head); list_add(&ctlr->node, &ctlr_list); INIT_LIST_HEAD(&ctlr->dh_list); return ctlr; } static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h, char *array_name, u8 *array_id) { int err = SCSI_DH_IO, i; struct c8_inquiry *inqp = &h->inq.c8; if (!scsi_get_vpd_page(sdev, 0xC8, (unsigned char *)inqp, sizeof(struct c8_inquiry))) { if (inqp->page_code != 0xc8) return SCSI_DH_NOSYS; if (inqp->page_id[0] != 'e' || inqp->page_id[1] != 'd' || inqp->page_id[2] != 'i' || inqp->page_id[3] != 'd') return SCSI_DH_NOSYS; h->lun = inqp->lun[7]; /* Uses only the last byte */ for(i=0; i<ARRAY_LABEL_LEN-1; ++i) *(array_name+i) = inqp->array_user_label[(2*i)+1]; *(array_name+ARRAY_LABEL_LEN-1) = '\0'; memset(array_id, 0, UNIQUE_ID_LEN); memcpy(array_id, inqp->array_unique_id, inqp->array_uniq_id_len); err = SCSI_DH_OK; } return err; } static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h) { int err = SCSI_DH_IO, access_state; struct rdac_dh_data *tmp; struct c9_inquiry *inqp = &h->inq.c9; h->state = RDAC_STATE_ACTIVE; if (!scsi_get_vpd_page(sdev, 0xC9, (unsigned char *)inqp, sizeof(struct c9_inquiry))) { /* detect the operating mode */ if ((inqp->avte_cvp >> 5) & 0x1) h->mode = RDAC_MODE_IOSHIP; /* LUN in IOSHIP mode */ else if (inqp->avte_cvp >> 7) h->mode = RDAC_MODE_AVT; /* LUN in AVT mode */ else h->mode = RDAC_MODE; /* LUN in RDAC mode */ /* Update ownership */ if (inqp->avte_cvp & 0x1) { h->lun_state = RDAC_LUN_OWNED; access_state = SCSI_ACCESS_STATE_OPTIMAL; } else { h->lun_state = RDAC_LUN_UNOWNED; if (h->mode == RDAC_MODE) { h->state = RDAC_STATE_PASSIVE; access_state = SCSI_ACCESS_STATE_STANDBY; } else access_state = SCSI_ACCESS_STATE_ACTIVE; } /* Update path prio*/ if (inqp->path_prio & 0x1) { h->preferred = RDAC_PREFERRED; access_state |= SCSI_ACCESS_STATE_PREFERRED; } else h->preferred = RDAC_NON_PREFERRED; rcu_read_lock(); list_for_each_entry_rcu(tmp, &h->ctlr->dh_list, node) { /* h->sdev should always be valid */ BUG_ON(!tmp->sdev); tmp->sdev->access_state = access_state; } rcu_read_unlock(); err = SCSI_DH_OK; } return err; } static int initialize_controller(struct scsi_device *sdev, struct rdac_dh_data *h, char *array_name, u8 *array_id) { int err = SCSI_DH_IO, index; struct c4_inquiry *inqp = &h->inq.c4; if (!scsi_get_vpd_page(sdev, 0xC4, (unsigned char *)inqp, sizeof(struct c4_inquiry))) { /* get the controller index */ if (inqp->slot_id[1] == 0x31) index = 0; else index = 1; spin_lock(&list_lock); h->ctlr = get_controller(index, array_name, array_id, sdev); if (!h->ctlr) err = SCSI_DH_RES_TEMP_UNAVAIL; else { h->sdev = sdev; list_add_rcu(&h->node, &h->ctlr->dh_list); } spin_unlock(&list_lock); err = SCSI_DH_OK; } return err; } static int set_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h) { int err = SCSI_DH_IO; struct c2_inquiry *inqp = &h->inq.c2; if (!scsi_get_vpd_page(sdev, 0xC2, (unsigned char *)inqp, sizeof(struct c2_inquiry))) { /* * If more than MODE6_MAX_LUN luns are supported, use * mode select 10 */ if (inqp->max_lun_supported >= MODE6_MAX_LUN) h->ctlr->use_ms10 = 1; else h->ctlr->use_ms10 = 0; err = SCSI_DH_OK; } return err; } static int mode_select_handle_sense(struct scsi_device *sdev, struct scsi_sense_hdr *sense_hdr) { int err = SCSI_DH_IO; struct rdac_dh_data *h = sdev->handler_data; if (!scsi_sense_valid(sense_hdr)) goto done; switch (sense_hdr->sense_key) { case NO_SENSE: case ABORTED_COMMAND: case UNIT_ATTENTION: err = SCSI_DH_RETRY; break; case NOT_READY: if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x01) /* LUN Not Ready and is in the Process of Becoming * Ready */ err = SCSI_DH_RETRY; break; case ILLEGAL_REQUEST: if (sense_hdr->asc == 0x91 && sense_hdr->ascq == 0x36) /* * Command Lock contention */ err = SCSI_DH_IMM_RETRY; break; default: break; } RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, " "MODE_SELECT returned with sense %02x/%02x/%02x", (char *) h->ctlr->array_name, h->ctlr->index, sense_hdr->sense_key, sense_hdr->asc, sense_hdr->ascq); done: return err; } static void send_mode_select(struct work_struct *work) { struct rdac_controller *ctlr = container_of(work, struct rdac_controller, ms_work); struct scsi_device *sdev = ctlr->ms_sdev; struct rdac_dh_data *h = sdev->handler_data; int err = SCSI_DH_OK, retry_cnt = RDAC_RETRY_COUNT; struct rdac_queue_data *tmp, *qdata; LIST_HEAD(list); unsigned char cdb[MAX_COMMAND_SIZE]; struct scsi_sense_hdr sshdr; unsigned int data_size; blk_opf_t opf = REQ_OP_DRV_OUT | REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; const struct scsi_exec_args exec_args = { .sshdr = &sshdr, }; spin_lock(&ctlr->ms_lock); list_splice_init(&ctlr->ms_head, &list); ctlr->ms_queued = 0; ctlr->ms_sdev = NULL; spin_unlock(&ctlr->ms_lock); retry: memset(cdb, 0, sizeof(cdb)); data_size = rdac_failover_get(ctlr, &list, cdb); RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, " "%s MODE_SELECT command", (char *) h->ctlr->array_name, h->ctlr->index, (retry_cnt == RDAC_RETRY_COUNT) ? "queueing" : "retrying"); if (scsi_execute_cmd(sdev, cdb, opf, &h->ctlr->mode_select, data_size, RDAC_TIMEOUT * HZ, RDAC_RETRIES, &exec_args)) { err = mode_select_handle_sense(sdev, &sshdr); if (err == SCSI_DH_RETRY && retry_cnt--) goto retry; if (err == SCSI_DH_IMM_RETRY) goto retry; } if (err == SCSI_DH_OK) { h->state = RDAC_STATE_ACTIVE; RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, " "MODE_SELECT completed", (char *) h->ctlr->array_name, h->ctlr->index); } list_for_each_entry_safe(qdata, tmp, &list, entry) { list_del(&qdata->entry); if (err == SCSI_DH_OK) qdata->h->state = RDAC_STATE_ACTIVE; if (qdata->callback_fn) qdata->callback_fn(qdata->callback_data, err); kfree(qdata); } return; } static int queue_mode_select(struct scsi_device *sdev, activate_complete fn, void *data) { struct rdac_queue_data *qdata; struct rdac_controller *ctlr; qdata = kzalloc(sizeof(*qdata), GFP_KERNEL); if (!qdata) return SCSI_DH_RETRY; qdata->h = sdev->handler_data; qdata->callback_fn = fn; qdata->callback_data = data; ctlr = qdata->h->ctlr; spin_lock(&ctlr->ms_lock); list_add_tail(&qdata->entry, &ctlr->ms_head); if (!ctlr->ms_queued) { ctlr->ms_queued = 1; ctlr->ms_sdev = sdev; queue_work(kmpath_rdacd, &ctlr->ms_work); } spin_unlock(&ctlr->ms_lock); return SCSI_DH_OK; } static int rdac_activate(struct scsi_device *sdev, activate_complete fn, void *data) { struct rdac_dh_data *h = sdev->handler_data; int err = SCSI_DH_OK; int act = 0; err = check_ownership(sdev, h); if (err != SCSI_DH_OK) goto done; switch (h->mode) { case RDAC_MODE: if (h->lun_state == RDAC_LUN_UNOWNED) act = 1; break; case RDAC_MODE_IOSHIP: if ((h->lun_state == RDAC_LUN_UNOWNED) && (h->preferred == RDAC_PREFERRED)) act = 1; break; default: break; } if (act) { err = queue_mode_select(sdev, fn, data); if (err == SCSI_DH_OK) return 0; } done: if (fn) fn(data, err); return 0; } static blk_status_t rdac_prep_fn(struct scsi_device *sdev, struct request *req) { struct rdac_dh_data *h = sdev->handler_data; if (h->state != RDAC_STATE_ACTIVE) { req->rq_flags |= RQF_QUIET; return BLK_STS_IOERR; } return BLK_STS_OK; } static enum scsi_disposition rdac_check_sense(struct scsi_device *sdev, struct scsi_sense_hdr *sense_hdr) { struct rdac_dh_data *h = sdev->handler_data; RDAC_LOG(RDAC_LOG_SENSE, sdev, "array %s, ctlr %d, " "I/O returned with sense %02x/%02x/%02x", (char *) h->ctlr->array_name, h->ctlr->index, sense_hdr->sense_key, sense_hdr->asc, sense_hdr->ascq); switch (sense_hdr->sense_key) { case NOT_READY: if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x01) /* LUN Not Ready - Logical Unit Not Ready and is in * the process of becoming ready * Just retry. */ return ADD_TO_MLQUEUE; if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x81) /* LUN Not Ready - Storage firmware incompatible * Manual code synchonisation required. * * Nothing we can do here. Try to bypass the path. */ return SUCCESS; if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0xA1) /* LUN Not Ready - Quiescense in progress * * Just retry and wait. */ return ADD_TO_MLQUEUE; if (sense_hdr->asc == 0xA1 && sense_hdr->ascq == 0x02) /* LUN Not Ready - Quiescense in progress * or has been achieved * Just retry. */ return ADD_TO_MLQUEUE; break; case ILLEGAL_REQUEST: if (sense_hdr->asc == 0x94 && sense_hdr->ascq == 0x01) { /* Invalid Request - Current Logical Unit Ownership. * Controller is not the current owner of the LUN, * Fail the path, so that the other path be used. */ h->state = RDAC_STATE_PASSIVE; return SUCCESS; } break; case UNIT_ATTENTION: if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00) /* * Power On, Reset, or Bus Device Reset, just retry. */ return ADD_TO_MLQUEUE; if (sense_hdr->asc == 0x8b && sense_hdr->ascq == 0x02) /* * Quiescence in progress , just retry. */ return ADD_TO_MLQUEUE; break; } /* success just means we do not care what scsi-ml does */ return SCSI_RETURN_NOT_HANDLED; } static int rdac_bus_attach(struct scsi_device *sdev) { struct rdac_dh_data *h; int err; char array_name[ARRAY_LABEL_LEN]; char array_id[UNIQUE_ID_LEN]; h = kzalloc(sizeof(*h) , GFP_KERNEL); if (!h) return SCSI_DH_NOMEM; h->lun = UNINITIALIZED_LUN; h->state = RDAC_STATE_ACTIVE; err = get_lun_info(sdev, h, array_name, array_id); if (err != SCSI_DH_OK) goto failed; err = initialize_controller(sdev, h, array_name, array_id); if (err != SCSI_DH_OK) goto failed; err = check_ownership(sdev, h); if (err != SCSI_DH_OK) goto clean_ctlr; err = set_mode_select(sdev, h); if (err != SCSI_DH_OK) goto clean_ctlr; sdev_printk(KERN_NOTICE, sdev, "%s: LUN %d (%s) (%s)\n", RDAC_NAME, h->lun, mode[(int)h->mode], lun_state[(int)h->lun_state]); sdev->handler_data = h; return SCSI_DH_OK; clean_ctlr: spin_lock(&list_lock); kref_put(&h->ctlr->kref, release_controller); spin_unlock(&list_lock); failed: kfree(h); return err; } static void rdac_bus_detach( struct scsi_device *sdev ) { struct rdac_dh_data *h = sdev->handler_data; if (h->ctlr && h->ctlr->ms_queued) flush_workqueue(kmpath_rdacd); spin_lock(&list_lock); if (h->ctlr) { list_del_rcu(&h->node); kref_put(&h->ctlr->kref, release_controller); } spin_unlock(&list_lock); sdev->handler_data = NULL; synchronize_rcu(); kfree(h); } static struct scsi_device_handler rdac_dh = { .name = RDAC_NAME, .module = THIS_MODULE, .prep_fn = rdac_prep_fn, .check_sense = rdac_check_sense, .attach = rdac_bus_attach, .detach = rdac_bus_detach, .activate = rdac_activate, }; static int __init rdac_init(void) { int r; r = scsi_register_device_handler(&rdac_dh); if (r != 0) { printk(KERN_ERR "Failed to register scsi device handler."); goto done; } /* * Create workqueue to handle mode selects for rdac */ kmpath_rdacd = create_singlethread_workqueue("kmpath_rdacd"); if (!kmpath_rdacd) { scsi_unregister_device_handler(&rdac_dh); printk(KERN_ERR "kmpath_rdacd creation failed.\n"); r = -EINVAL; } done: return r; } static void __exit rdac_exit(void) { destroy_workqueue(kmpath_rdacd); scsi_unregister_device_handler(&rdac_dh); } module_init(rdac_init); module_exit(rdac_exit); MODULE_DESCRIPTION("Multipath LSI/Engenio/NetApp E-Series RDAC driver"); MODULE_AUTHOR("Mike Christie, Chandra Seetharaman"); MODULE_VERSION("01.00.0000.0000"); MODULE_LICENSE("GPL");
linux-master
drivers/scsi/device_handler/scsi_dh_rdac.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/drivers/acorn/scsi/cumana_2.c * * Copyright (C) 1997-2005 Russell King * * Changelog: * 30-08-1997 RMK 0.0.0 Created, READONLY version. * 22-01-1998 RMK 0.0.1 Updated to 2.1.80. * 15-04-1998 RMK 0.0.1 Only do PIO if FAS216 will allow it. * 02-05-1998 RMK 0.0.2 Updated & added DMA support. * 27-06-1998 RMK Changed asm/delay.h to linux/delay.h * 18-08-1998 RMK 0.0.3 Fixed synchronous transfer depth. * 02-04-2000 RMK 0.0.4 Updated for new error handling code. */ #include <linux/module.h> #include <linux/blkdev.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/ioport.h> #include <linux/proc_fs.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/dma-mapping.h> #include <linux/pgtable.h> #include <asm/dma.h> #include <asm/ecard.h> #include <asm/io.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include "fas216.h" #include "arm_scsi.h" #include <scsi/scsicam.h> #define CUMANASCSI2_STATUS (0x0000) #define STATUS_INT (1 << 0) #define STATUS_DRQ (1 << 1) #define STATUS_LATCHED (1 << 3) #define CUMANASCSI2_ALATCH (0x0014) #define ALATCH_ENA_INT (3) #define ALATCH_DIS_INT (2) #define ALATCH_ENA_TERM (5) #define ALATCH_DIS_TERM (4) #define ALATCH_ENA_BIT32 (11) #define ALATCH_DIS_BIT32 (10) #define ALATCH_ENA_DMA (13) #define ALATCH_DIS_DMA (12) #define ALATCH_DMA_OUT (15) #define ALATCH_DMA_IN (14) #define CUMANASCSI2_PSEUDODMA (0x0200) #define CUMANASCSI2_FAS216_OFFSET (0x0300) #define CUMANASCSI2_FAS216_SHIFT 2 /* * Version */ #define VERSION "1.00 (13/11/2002 2.5.47)" /* * Use term=0,1,0,0,0 to turn terminators on/off */ static int term[MAX_ECARDS] = { 1, 1, 1, 1, 1, 1, 1, 1 }; #define NR_SG 256 struct cumanascsi2_info { FAS216_Info info; struct expansion_card *ec; void __iomem *base; unsigned int terms; /* Terminator state */ struct scatterlist sg[NR_SG]; /* Scatter DMA list */ }; #define CSTATUS_IRQ (1 << 0) #define CSTATUS_DRQ (1 << 1) /* Prototype: void cumanascsi_2_irqenable(ec, irqnr) * Purpose : Enable interrupts on Cumana SCSI 2 card * Params : ec - expansion card structure * : irqnr - interrupt number */ static void cumanascsi_2_irqenable(struct expansion_card *ec, int irqnr) { struct cumanascsi2_info *info = ec->irq_data; writeb(ALATCH_ENA_INT, info->base + CUMANASCSI2_ALATCH); } /* Prototype: void cumanascsi_2_irqdisable(ec, irqnr) * Purpose : Disable interrupts on Cumana SCSI 2 card * Params : ec - expansion card structure * : irqnr - interrupt number */ static void cumanascsi_2_irqdisable(struct expansion_card *ec, int irqnr) { struct cumanascsi2_info *info = ec->irq_data; writeb(ALATCH_DIS_INT, info->base + CUMANASCSI2_ALATCH); } static const expansioncard_ops_t cumanascsi_2_ops = { .irqenable = cumanascsi_2_irqenable, .irqdisable = cumanascsi_2_irqdisable, }; /* Prototype: void cumanascsi_2_terminator_ctl(host, on_off) * Purpose : Turn the Cumana SCSI 2 terminators on or off * Params : host - card to turn on/off * : on_off - !0 to turn on, 0 to turn off */ static void cumanascsi_2_terminator_ctl(struct Scsi_Host *host, int on_off) { struct cumanascsi2_info *info = (struct cumanascsi2_info *)host->hostdata; if (on_off) { info->terms = 1; writeb(ALATCH_ENA_TERM, info->base + CUMANASCSI2_ALATCH); } else { info->terms = 0; writeb(ALATCH_DIS_TERM, info->base + CUMANASCSI2_ALATCH); } } /* Prototype: void cumanascsi_2_intr(irq, *dev_id, *regs) * Purpose : handle interrupts from Cumana SCSI 2 card * Params : irq - interrupt number * dev_id - user-defined (Scsi_Host structure) */ static irqreturn_t cumanascsi_2_intr(int irq, void *dev_id) { struct cumanascsi2_info *info = dev_id; return fas216_intr(&info->info); } /* Prototype: fasdmatype_t cumanascsi_2_dma_setup(host, SCpnt, direction, min_type) * Purpose : initialises DMA/PIO * Params : host - host * SCpnt - command * direction - DMA on to/off of card * min_type - minimum DMA support that we must have for this transfer * Returns : type of transfer to be performed */ static fasdmatype_t cumanascsi_2_dma_setup(struct Scsi_Host *host, struct scsi_pointer *SCp, fasdmadir_t direction, fasdmatype_t min_type) { struct cumanascsi2_info *info = (struct cumanascsi2_info *)host->hostdata; struct device *dev = scsi_get_device(host); int dmach = info->info.scsi.dma; writeb(ALATCH_DIS_DMA, info->base + CUMANASCSI2_ALATCH); if (dmach != NO_DMA && (min_type == fasdma_real_all || SCp->this_residual >= 512)) { int bufs, map_dir, dma_dir, alatch_dir; bufs = copy_SCp_to_sg(&info->sg[0], SCp, NR_SG); if (direction == DMA_OUT) { map_dir = DMA_TO_DEVICE; dma_dir = DMA_MODE_WRITE; alatch_dir = ALATCH_DMA_OUT; } else { map_dir = DMA_FROM_DEVICE; dma_dir = DMA_MODE_READ; alatch_dir = ALATCH_DMA_IN; } dma_map_sg(dev, info->sg, bufs, map_dir); disable_dma(dmach); set_dma_sg(dmach, info->sg, bufs); writeb(alatch_dir, info->base + CUMANASCSI2_ALATCH); set_dma_mode(dmach, dma_dir); enable_dma(dmach); writeb(ALATCH_ENA_DMA, info->base + CUMANASCSI2_ALATCH); writeb(ALATCH_DIS_BIT32, info->base + CUMANASCSI2_ALATCH); return fasdma_real_all; } /* * If we're not doing DMA, * we'll do pseudo DMA */ return fasdma_pio; } /* * Prototype: void cumanascsi_2_dma_pseudo(host, SCpnt, direction, transfer) * Purpose : handles pseudo DMA * Params : host - host * SCpnt - command * direction - DMA on to/off of card * transfer - minimum number of bytes we expect to transfer */ static void cumanascsi_2_dma_pseudo(struct Scsi_Host *host, struct scsi_pointer *SCp, fasdmadir_t direction, int transfer) { struct cumanascsi2_info *info = (struct cumanascsi2_info *)host->hostdata; unsigned int length; unsigned char *addr; length = SCp->this_residual; addr = SCp->ptr; if (direction == DMA_OUT) #if 0 while (length > 1) { unsigned long word; unsigned int status = readb(info->base + CUMANASCSI2_STATUS); if (status & STATUS_INT) goto end; if (!(status & STATUS_DRQ)) continue; word = *addr | *(addr + 1) << 8; writew(word, info->base + CUMANASCSI2_PSEUDODMA); addr += 2; length -= 2; } #else printk ("PSEUDO_OUT???\n"); #endif else { if (transfer && (transfer & 255)) { while (length >= 256) { unsigned int status = readb(info->base + CUMANASCSI2_STATUS); if (status & STATUS_INT) return; if (!(status & STATUS_DRQ)) continue; readsw(info->base + CUMANASCSI2_PSEUDODMA, addr, 256 >> 1); addr += 256; length -= 256; } } while (length > 0) { unsigned long word; unsigned int status = readb(info->base + CUMANASCSI2_STATUS); if (status & STATUS_INT) return; if (!(status & STATUS_DRQ)) continue; word = readw(info->base + CUMANASCSI2_PSEUDODMA); *addr++ = word; if (--length > 0) { *addr++ = word >> 8; length --; } } } } /* Prototype: int cumanascsi_2_dma_stop(host, SCpnt) * Purpose : stops DMA/PIO * Params : host - host * SCpnt - command */ static void cumanascsi_2_dma_stop(struct Scsi_Host *host, struct scsi_pointer *SCp) { struct cumanascsi2_info *info = (struct cumanascsi2_info *)host->hostdata; if (info->info.scsi.dma != NO_DMA) { writeb(ALATCH_DIS_DMA, info->base + CUMANASCSI2_ALATCH); disable_dma(info->info.scsi.dma); } } /* Prototype: const char *cumanascsi_2_info(struct Scsi_Host * host) * Purpose : returns a descriptive string about this interface, * Params : host - driver host structure to return info for. * Returns : pointer to a static buffer containing null terminated string. */ const char *cumanascsi_2_info(struct Scsi_Host *host) { struct cumanascsi2_info *info = (struct cumanascsi2_info *)host->hostdata; static char string[150]; sprintf(string, "%s (%s) in slot %d v%s terminators o%s", host->hostt->name, info->info.scsi.type, info->ec->slot_no, VERSION, info->terms ? "n" : "ff"); return string; } /* Prototype: int cumanascsi_2_set_proc_info(struct Scsi_Host *host, char *buffer, int length) * Purpose : Set a driver specific function * Params : host - host to setup * : buffer - buffer containing string describing operation * : length - length of string * Returns : -EINVAL, or 0 */ static int cumanascsi_2_set_proc_info(struct Scsi_Host *host, char *buffer, int length) { int ret = length; if (length >= 11 && strncmp(buffer, "CUMANASCSI2", 11) == 0) { buffer += 11; length -= 11; if (length >= 5 && strncmp(buffer, "term=", 5) == 0) { if (buffer[5] == '1') cumanascsi_2_terminator_ctl(host, 1); else if (buffer[5] == '0') cumanascsi_2_terminator_ctl(host, 0); else ret = -EINVAL; } else { ret = -EINVAL; } } else { ret = -EINVAL; } return ret; } static int cumanascsi_2_show_info(struct seq_file *m, struct Scsi_Host *host) { struct cumanascsi2_info *info; info = (struct cumanascsi2_info *)host->hostdata; seq_printf(m, "Cumana SCSI II driver v%s\n", VERSION); fas216_print_host(&info->info, m); seq_printf(m, "Term : o%s\n", info->terms ? "n" : "ff"); fas216_print_stats(&info->info, m); fas216_print_devices(&info->info, m); return 0; } static const struct scsi_host_template cumanascsi2_template = { .module = THIS_MODULE, .show_info = cumanascsi_2_show_info, .write_info = cumanascsi_2_set_proc_info, .name = "Cumana SCSI II", .info = cumanascsi_2_info, .queuecommand = fas216_queue_command, .eh_host_reset_handler = fas216_eh_host_reset, .eh_bus_reset_handler = fas216_eh_bus_reset, .eh_device_reset_handler = fas216_eh_device_reset, .eh_abort_handler = fas216_eh_abort, .cmd_size = sizeof(struct fas216_cmd_priv), .can_queue = 1, .this_id = 7, .sg_tablesize = SG_MAX_SEGMENTS, .dma_boundary = IOMD_DMA_BOUNDARY, .proc_name = "cumanascsi2", }; static int cumanascsi2_probe(struct expansion_card *ec, const struct ecard_id *id) { struct Scsi_Host *host; struct cumanascsi2_info *info; void __iomem *base; int ret; ret = ecard_request_resources(ec); if (ret) goto out; base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0); if (!base) { ret = -ENOMEM; goto out_region; } host = scsi_host_alloc(&cumanascsi2_template, sizeof(struct cumanascsi2_info)); if (!host) { ret = -ENOMEM; goto out_region; } ecard_set_drvdata(ec, host); info = (struct cumanascsi2_info *)host->hostdata; info->ec = ec; info->base = base; cumanascsi_2_terminator_ctl(host, term[ec->slot_no]); info->info.scsi.io_base = base + CUMANASCSI2_FAS216_OFFSET; info->info.scsi.io_shift = CUMANASCSI2_FAS216_SHIFT; info->info.scsi.irq = ec->irq; info->info.scsi.dma = ec->dma; info->info.ifcfg.clockrate = 40; /* MHz */ info->info.ifcfg.select_timeout = 255; info->info.ifcfg.asyncperiod = 200; /* ns */ info->info.ifcfg.sync_max_depth = 7; info->info.ifcfg.cntl3 = CNTL3_BS8 | CNTL3_FASTSCSI | CNTL3_FASTCLK; info->info.ifcfg.disconnect_ok = 1; info->info.ifcfg.wide_max_size = 0; info->info.ifcfg.capabilities = FASCAP_PSEUDODMA; info->info.dma.setup = cumanascsi_2_dma_setup; info->info.dma.pseudo = cumanascsi_2_dma_pseudo; info->info.dma.stop = cumanascsi_2_dma_stop; ec->irqaddr = info->base + CUMANASCSI2_STATUS; ec->irqmask = STATUS_INT; ecard_setirq(ec, &cumanascsi_2_ops, info); ret = fas216_init(host); if (ret) goto out_free; ret = request_irq(ec->irq, cumanascsi_2_intr, 0, "cumanascsi2", info); if (ret) { printk("scsi%d: IRQ%d not free: %d\n", host->host_no, ec->irq, ret); goto out_release; } if (info->info.scsi.dma != NO_DMA) { if (request_dma(info->info.scsi.dma, "cumanascsi2")) { printk("scsi%d: DMA%d not free, using PIO\n", host->host_no, info->info.scsi.dma); info->info.scsi.dma = NO_DMA; } else { set_dma_speed(info->info.scsi.dma, 180); info->info.ifcfg.capabilities |= FASCAP_DMA; } } ret = fas216_add(host, &ec->dev); if (ret == 0) goto out; if (info->info.scsi.dma != NO_DMA) free_dma(info->info.scsi.dma); free_irq(ec->irq, info); out_release: fas216_release(host); out_free: scsi_host_put(host); out_region: ecard_release_resources(ec); out: return ret; } static void cumanascsi2_remove(struct expansion_card *ec) { struct Scsi_Host *host = ecard_get_drvdata(ec); struct cumanascsi2_info *info = (struct cumanascsi2_info *)host->hostdata; ecard_set_drvdata(ec, NULL); fas216_remove(host); if (info->info.scsi.dma != NO_DMA) free_dma(info->info.scsi.dma); free_irq(ec->irq, info); fas216_release(host); scsi_host_put(host); ecard_release_resources(ec); } static const struct ecard_id cumanascsi2_cids[] = { { MANU_CUMANA, PROD_CUMANA_SCSI_2 }, { 0xffff, 0xffff }, }; static struct ecard_driver cumanascsi2_driver = { .probe = cumanascsi2_probe, .remove = cumanascsi2_remove, .id_table = cumanascsi2_cids, .drv = { .name = "cumanascsi2", }, }; static int __init cumanascsi2_init(void) { return ecard_register_driver(&cumanascsi2_driver); } static void __exit cumanascsi2_exit(void) { ecard_remove_driver(&cumanascsi2_driver); } module_init(cumanascsi2_init); module_exit(cumanascsi2_exit); MODULE_AUTHOR("Russell King"); MODULE_DESCRIPTION("Cumana SCSI-2 driver for Acorn machines"); module_param_array(term, int, NULL, 0); MODULE_PARM_DESC(term, "SCSI bus termination"); MODULE_LICENSE("GPL");
linux-master
drivers/scsi/arm/cumana_2.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/drivers/acorn/scsi/eesox.c * * Copyright (C) 1997-2005 Russell King * * This driver is based on experimentation. Hence, it may have made * assumptions about the particular card that I have available, and * may not be reliable! * * Changelog: * 01-10-1997 RMK Created, READONLY version * 15-02-1998 RMK READ/WRITE version * added DMA support and hardware definitions * 14-03-1998 RMK Updated DMA support * Added terminator control * 15-04-1998 RMK Only do PIO if FAS216 will allow it. * 27-06-1998 RMK Changed asm/delay.h to linux/delay.h * 02-04-2000 RMK 0.0.3 Fixed NO_IRQ/NO_DMA problem, updated for new * error handling code. */ #include <linux/module.h> #include <linux/blkdev.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/ioport.h> #include <linux/proc_fs.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/dma-mapping.h> #include <linux/pgtable.h> #include <asm/io.h> #include <asm/dma.h> #include <asm/ecard.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include "fas216.h" #include "arm_scsi.h" #include <scsi/scsicam.h> #define EESOX_FAS216_OFFSET 0x3000 #define EESOX_FAS216_SHIFT 5 #define EESOX_DMASTAT 0x2800 #define EESOX_STAT_INTR 0x01 #define EESOX_STAT_DMA 0x02 #define EESOX_CONTROL 0x2800 #define EESOX_INTR_ENABLE 0x04 #define EESOX_TERM_ENABLE 0x02 #define EESOX_RESET 0x01 #define EESOX_DMADATA 0x3800 #define VERSION "1.10 (17/01/2003 2.5.59)" /* * Use term=0,1,0,0,0 to turn terminators on/off */ static int term[MAX_ECARDS] = { 1, 1, 1, 1, 1, 1, 1, 1 }; #define NR_SG 256 struct eesoxscsi_info { FAS216_Info info; struct expansion_card *ec; void __iomem *base; void __iomem *ctl_port; unsigned int control; struct scatterlist sg[NR_SG]; /* Scatter DMA list */ }; /* Prototype: void eesoxscsi_irqenable(ec, irqnr) * Purpose : Enable interrupts on EESOX SCSI card * Params : ec - expansion card structure * : irqnr - interrupt number */ static void eesoxscsi_irqenable(struct expansion_card *ec, int irqnr) { struct eesoxscsi_info *info = (struct eesoxscsi_info *)ec->irq_data; info->control |= EESOX_INTR_ENABLE; writeb(info->control, info->ctl_port); } /* Prototype: void eesoxscsi_irqdisable(ec, irqnr) * Purpose : Disable interrupts on EESOX SCSI card * Params : ec - expansion card structure * : irqnr - interrupt number */ static void eesoxscsi_irqdisable(struct expansion_card *ec, int irqnr) { struct eesoxscsi_info *info = (struct eesoxscsi_info *)ec->irq_data; info->control &= ~EESOX_INTR_ENABLE; writeb(info->control, info->ctl_port); } static const expansioncard_ops_t eesoxscsi_ops = { .irqenable = eesoxscsi_irqenable, .irqdisable = eesoxscsi_irqdisable, }; /* Prototype: void eesoxscsi_terminator_ctl(*host, on_off) * Purpose : Turn the EESOX SCSI terminators on or off * Params : host - card to turn on/off * : on_off - !0 to turn on, 0 to turn off */ static void eesoxscsi_terminator_ctl(struct Scsi_Host *host, int on_off) { struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; unsigned long flags; spin_lock_irqsave(host->host_lock, flags); if (on_off) info->control |= EESOX_TERM_ENABLE; else info->control &= ~EESOX_TERM_ENABLE; writeb(info->control, info->ctl_port); spin_unlock_irqrestore(host->host_lock, flags); } /* Prototype: void eesoxscsi_intr(irq, *dev_id, *regs) * Purpose : handle interrupts from EESOX SCSI card * Params : irq - interrupt number * dev_id - user-defined (Scsi_Host structure) */ static irqreturn_t eesoxscsi_intr(int irq, void *dev_id) { struct eesoxscsi_info *info = dev_id; return fas216_intr(&info->info); } /* Prototype: fasdmatype_t eesoxscsi_dma_setup(host, SCpnt, direction, min_type) * Purpose : initialises DMA/PIO * Params : host - host * SCpnt - command * direction - DMA on to/off of card * min_type - minimum DMA support that we must have for this transfer * Returns : type of transfer to be performed */ static fasdmatype_t eesoxscsi_dma_setup(struct Scsi_Host *host, struct scsi_pointer *SCp, fasdmadir_t direction, fasdmatype_t min_type) { struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; struct device *dev = scsi_get_device(host); int dmach = info->info.scsi.dma; if (dmach != NO_DMA && (min_type == fasdma_real_all || SCp->this_residual >= 512)) { int bufs, map_dir, dma_dir; bufs = copy_SCp_to_sg(&info->sg[0], SCp, NR_SG); if (direction == DMA_OUT) { map_dir = DMA_TO_DEVICE; dma_dir = DMA_MODE_WRITE; } else { map_dir = DMA_FROM_DEVICE; dma_dir = DMA_MODE_READ; } dma_map_sg(dev, info->sg, bufs, map_dir); disable_dma(dmach); set_dma_sg(dmach, info->sg, bufs); set_dma_mode(dmach, dma_dir); enable_dma(dmach); return fasdma_real_all; } /* * We don't do DMA, we only do slow PIO * * Some day, we will do Pseudo DMA */ return fasdma_pseudo; } static void eesoxscsi_buffer_in(void *buf, int length, void __iomem *base) { const void __iomem *reg_fas = base + EESOX_FAS216_OFFSET; const void __iomem *reg_dmastat = base + EESOX_DMASTAT; const void __iomem *reg_dmadata = base + EESOX_DMADATA; register const unsigned long mask = 0xffff; do { unsigned int status; /* * Interrupt request? */ status = readb(reg_fas + (REG_STAT << EESOX_FAS216_SHIFT)); if (status & STAT_INT) break; /* * DMA request active? */ status = readb(reg_dmastat); if (!(status & EESOX_STAT_DMA)) continue; /* * Get number of bytes in FIFO */ status = readb(reg_fas + (REG_CFIS << EESOX_FAS216_SHIFT)) & CFIS_CF; if (status > 16) status = 16; if (status > length) status = length; /* * Align buffer. */ if (((u32)buf) & 2 && status >= 2) { *(u16 *)buf = readl(reg_dmadata); buf += 2; status -= 2; length -= 2; } if (status >= 8) { unsigned long l1, l2; l1 = readl(reg_dmadata) & mask; l1 |= readl(reg_dmadata) << 16; l2 = readl(reg_dmadata) & mask; l2 |= readl(reg_dmadata) << 16; *(u32 *)buf = l1; buf += 4; *(u32 *)buf = l2; buf += 4; length -= 8; continue; } if (status >= 4) { unsigned long l1; l1 = readl(reg_dmadata) & mask; l1 |= readl(reg_dmadata) << 16; *(u32 *)buf = l1; buf += 4; length -= 4; continue; } if (status >= 2) { *(u16 *)buf = readl(reg_dmadata); buf += 2; length -= 2; } } while (length); } static void eesoxscsi_buffer_out(void *buf, int length, void __iomem *base) { const void __iomem *reg_fas = base + EESOX_FAS216_OFFSET; const void __iomem *reg_dmastat = base + EESOX_DMASTAT; void __iomem *reg_dmadata = base + EESOX_DMADATA; do { unsigned int status; /* * Interrupt request? */ status = readb(reg_fas + (REG_STAT << EESOX_FAS216_SHIFT)); if (status & STAT_INT) break; /* * DMA request active? */ status = readb(reg_dmastat); if (!(status & EESOX_STAT_DMA)) continue; /* * Get number of bytes in FIFO */ status = readb(reg_fas + (REG_CFIS << EESOX_FAS216_SHIFT)) & CFIS_CF; if (status > 16) status = 16; status = 16 - status; if (status > length) status = length; status &= ~1; /* * Align buffer. */ if (((u32)buf) & 2 && status >= 2) { writel(*(u16 *)buf << 16, reg_dmadata); buf += 2; status -= 2; length -= 2; } if (status >= 8) { unsigned long l1, l2; l1 = *(u32 *)buf; buf += 4; l2 = *(u32 *)buf; buf += 4; writel(l1 << 16, reg_dmadata); writel(l1, reg_dmadata); writel(l2 << 16, reg_dmadata); writel(l2, reg_dmadata); length -= 8; continue; } if (status >= 4) { unsigned long l1; l1 = *(u32 *)buf; buf += 4; writel(l1 << 16, reg_dmadata); writel(l1, reg_dmadata); length -= 4; continue; } if (status >= 2) { writel(*(u16 *)buf << 16, reg_dmadata); buf += 2; length -= 2; } } while (length); } static void eesoxscsi_dma_pseudo(struct Scsi_Host *host, struct scsi_pointer *SCp, fasdmadir_t dir, int transfer_size) { struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; if (dir == DMA_IN) { eesoxscsi_buffer_in(SCp->ptr, SCp->this_residual, info->base); } else { eesoxscsi_buffer_out(SCp->ptr, SCp->this_residual, info->base); } } /* Prototype: int eesoxscsi_dma_stop(host, SCpnt) * Purpose : stops DMA/PIO * Params : host - host * SCpnt - command */ static void eesoxscsi_dma_stop(struct Scsi_Host *host, struct scsi_pointer *SCp) { struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; if (info->info.scsi.dma != NO_DMA) disable_dma(info->info.scsi.dma); } /* Prototype: const char *eesoxscsi_info(struct Scsi_Host * host) * Purpose : returns a descriptive string about this interface, * Params : host - driver host structure to return info for. * Returns : pointer to a static buffer containing null terminated string. */ const char *eesoxscsi_info(struct Scsi_Host *host) { struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; static char string[150]; sprintf(string, "%s (%s) in slot %d v%s terminators o%s", host->hostt->name, info->info.scsi.type, info->ec->slot_no, VERSION, info->control & EESOX_TERM_ENABLE ? "n" : "ff"); return string; } /* Prototype: int eesoxscsi_set_proc_info(struct Scsi_Host *host, char *buffer, int length) * Purpose : Set a driver specific function * Params : host - host to setup * : buffer - buffer containing string describing operation * : length - length of string * Returns : -EINVAL, or 0 */ static int eesoxscsi_set_proc_info(struct Scsi_Host *host, char *buffer, int length) { int ret = length; if (length >= 9 && strncmp(buffer, "EESOXSCSI", 9) == 0) { buffer += 9; length -= 9; if (length >= 5 && strncmp(buffer, "term=", 5) == 0) { if (buffer[5] == '1') eesoxscsi_terminator_ctl(host, 1); else if (buffer[5] == '0') eesoxscsi_terminator_ctl(host, 0); else ret = -EINVAL; } else ret = -EINVAL; } else ret = -EINVAL; return ret; } static int eesoxscsi_show_info(struct seq_file *m, struct Scsi_Host *host) { struct eesoxscsi_info *info; info = (struct eesoxscsi_info *)host->hostdata; seq_printf(m, "EESOX SCSI driver v%s\n", VERSION); fas216_print_host(&info->info, m); seq_printf(m, "Term : o%s\n", info->control & EESOX_TERM_ENABLE ? "n" : "ff"); fas216_print_stats(&info->info, m); fas216_print_devices(&info->info, m); return 0; } static ssize_t eesoxscsi_show_term(struct device *dev, struct device_attribute *attr, char *buf) { struct expansion_card *ec = ECARD_DEV(dev); struct Scsi_Host *host = ecard_get_drvdata(ec); struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; return sprintf(buf, "%d\n", info->control & EESOX_TERM_ENABLE ? 1 : 0); } static ssize_t eesoxscsi_store_term(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct expansion_card *ec = ECARD_DEV(dev); struct Scsi_Host *host = ecard_get_drvdata(ec); struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; unsigned long flags; if (len > 1) { spin_lock_irqsave(host->host_lock, flags); if (buf[0] != '0') { info->control |= EESOX_TERM_ENABLE; } else { info->control &= ~EESOX_TERM_ENABLE; } writeb(info->control, info->ctl_port); spin_unlock_irqrestore(host->host_lock, flags); } return len; } static DEVICE_ATTR(bus_term, S_IRUGO | S_IWUSR, eesoxscsi_show_term, eesoxscsi_store_term); static const struct scsi_host_template eesox_template = { .module = THIS_MODULE, .show_info = eesoxscsi_show_info, .write_info = eesoxscsi_set_proc_info, .name = "EESOX SCSI", .info = eesoxscsi_info, .queuecommand = fas216_queue_command, .eh_host_reset_handler = fas216_eh_host_reset, .eh_bus_reset_handler = fas216_eh_bus_reset, .eh_device_reset_handler = fas216_eh_device_reset, .eh_abort_handler = fas216_eh_abort, .cmd_size = sizeof(struct fas216_cmd_priv), .can_queue = 1, .this_id = 7, .sg_tablesize = SG_MAX_SEGMENTS, .dma_boundary = IOMD_DMA_BOUNDARY, .proc_name = "eesox", }; static int eesoxscsi_probe(struct expansion_card *ec, const struct ecard_id *id) { struct Scsi_Host *host; struct eesoxscsi_info *info; void __iomem *base; int ret; ret = ecard_request_resources(ec); if (ret) goto out; base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); if (!base) { ret = -ENOMEM; goto out_region; } host = scsi_host_alloc(&eesox_template, sizeof(struct eesoxscsi_info)); if (!host) { ret = -ENOMEM; goto out_region; } ecard_set_drvdata(ec, host); info = (struct eesoxscsi_info *)host->hostdata; info->ec = ec; info->base = base; info->ctl_port = base + EESOX_CONTROL; info->control = term[ec->slot_no] ? EESOX_TERM_ENABLE : 0; writeb(info->control, info->ctl_port); info->info.scsi.io_base = base + EESOX_FAS216_OFFSET; info->info.scsi.io_shift = EESOX_FAS216_SHIFT; info->info.scsi.irq = ec->irq; info->info.scsi.dma = ec->dma; info->info.ifcfg.clockrate = 40; /* MHz */ info->info.ifcfg.select_timeout = 255; info->info.ifcfg.asyncperiod = 200; /* ns */ info->info.ifcfg.sync_max_depth = 7; info->info.ifcfg.cntl3 = CNTL3_FASTSCSI | CNTL3_FASTCLK; info->info.ifcfg.disconnect_ok = 1; info->info.ifcfg.wide_max_size = 0; info->info.ifcfg.capabilities = FASCAP_PSEUDODMA; info->info.dma.setup = eesoxscsi_dma_setup; info->info.dma.pseudo = eesoxscsi_dma_pseudo; info->info.dma.stop = eesoxscsi_dma_stop; ec->irqaddr = base + EESOX_DMASTAT; ec->irqmask = EESOX_STAT_INTR; ecard_setirq(ec, &eesoxscsi_ops, info); device_create_file(&ec->dev, &dev_attr_bus_term); ret = fas216_init(host); if (ret) goto out_free; ret = request_irq(ec->irq, eesoxscsi_intr, 0, "eesoxscsi", info); if (ret) { printk("scsi%d: IRQ%d not free: %d\n", host->host_no, ec->irq, ret); goto out_remove; } if (info->info.scsi.dma != NO_DMA) { if (request_dma(info->info.scsi.dma, "eesox")) { printk("scsi%d: DMA%d not free, DMA disabled\n", host->host_no, info->info.scsi.dma); info->info.scsi.dma = NO_DMA; } else { set_dma_speed(info->info.scsi.dma, 180); info->info.ifcfg.capabilities |= FASCAP_DMA; info->info.ifcfg.cntl3 |= CNTL3_BS8; } } ret = fas216_add(host, &ec->dev); if (ret == 0) goto out; if (info->info.scsi.dma != NO_DMA) free_dma(info->info.scsi.dma); free_irq(ec->irq, info); out_remove: fas216_remove(host); out_free: device_remove_file(&ec->dev, &dev_attr_bus_term); scsi_host_put(host); out_region: ecard_release_resources(ec); out: return ret; } static void eesoxscsi_remove(struct expansion_card *ec) { struct Scsi_Host *host = ecard_get_drvdata(ec); struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; ecard_set_drvdata(ec, NULL); fas216_remove(host); if (info->info.scsi.dma != NO_DMA) free_dma(info->info.scsi.dma); free_irq(ec->irq, info); device_remove_file(&ec->dev, &dev_attr_bus_term); fas216_release(host); scsi_host_put(host); ecard_release_resources(ec); } static const struct ecard_id eesoxscsi_cids[] = { { MANU_EESOX, PROD_EESOX_SCSI2 }, { 0xffff, 0xffff }, }; static struct ecard_driver eesoxscsi_driver = { .probe = eesoxscsi_probe, .remove = eesoxscsi_remove, .id_table = eesoxscsi_cids, .drv = { .name = "eesoxscsi", }, }; static int __init eesox_init(void) { return ecard_register_driver(&eesoxscsi_driver); } static void __exit eesox_exit(void) { ecard_remove_driver(&eesoxscsi_driver); } module_init(eesox_init); module_exit(eesox_exit); MODULE_AUTHOR("Russell King"); MODULE_DESCRIPTION("EESOX 'Fast' SCSI driver for Acorn machines"); module_param_array(term, int, NULL, 0); MODULE_PARM_DESC(term, "SCSI bus termination"); MODULE_LICENSE("GPL");
linux-master
drivers/scsi/arm/eesox.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/drivers/scsi/arm/arxescsi.c * * Copyright (C) 1997-2000 Russell King, Stefan Hanske * * This driver is based on experimentation. Hence, it may have made * assumptions about the particular card that I have available, and * may not be reliable! * * Changelog: * 30-08-1997 RMK 0.0.0 Created, READONLY version as cumana_2.c * 22-01-1998 RMK 0.0.1 Updated to 2.1.80 * 15-04-1998 RMK 0.0.1 Only do PIO if FAS216 will allow it. * 11-06-1998 SH 0.0.2 Changed to support ARXE 16-bit SCSI card * enabled writing * 01-01-2000 SH 0.1.0 Added *real* pseudo dma writing * (arxescsi_pseudo_dma_write) * 02-04-2000 RMK 0.1.1 Updated for new error handling code. * 22-10-2000 SH Updated for new registering scheme. */ #include <linux/module.h> #include <linux/blkdev.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/ioport.h> #include <linux/proc_fs.h> #include <linux/unistd.h> #include <linux/stat.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <asm/dma.h> #include <asm/io.h> #include <asm/ecard.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include "fas216.h" struct arxescsi_info { FAS216_Info info; struct expansion_card *ec; void __iomem *base; }; #define DMADATA_OFFSET (0x200) #define DMASTAT_OFFSET (0x600) #define DMASTAT_DRQ (1 << 0) #define CSTATUS_IRQ (1 << 0) #define VERSION "1.10 (23/01/2003 2.5.57)" /* * Function: int arxescsi_dma_setup(host, SCpnt, direction, min_type) * Purpose : initialises DMA/PIO * Params : host - host * SCpnt - command * direction - DMA on to/off of card * min_type - minimum DMA support that we must have for this transfer * Returns : 0 if we should not set CMD_WITHDMA for transfer info command */ static fasdmatype_t arxescsi_dma_setup(struct Scsi_Host *host, struct scsi_pointer *SCp, fasdmadir_t direction, fasdmatype_t min_type) { /* * We don't do real DMA */ return fasdma_pseudo; } static void arxescsi_pseudo_dma_write(unsigned char *addr, void __iomem *base) { __asm__ __volatile__( " stmdb sp!, {r0-r12}\n" " mov r3, %0\n" " mov r1, %1\n" " add r2, r1, #512\n" " mov r4, #256\n" ".loop_1: ldmia r3!, {r6, r8, r10, r12}\n" " mov r5, r6, lsl #16\n" " mov r7, r8, lsl #16\n" ".loop_2: ldrb r0, [r1, #1536]\n" " tst r0, #1\n" " beq .loop_2\n" " stmia r2, {r5-r8}\n\t" " mov r9, r10, lsl #16\n" " mov r11, r12, lsl #16\n" ".loop_3: ldrb r0, [r1, #1536]\n" " tst r0, #1\n" " beq .loop_3\n" " stmia r2, {r9-r12}\n" " subs r4, r4, #16\n" " bne .loop_1\n" " ldmia sp!, {r0-r12}\n" : : "r" (addr), "r" (base)); } /* * Function: int arxescsi_dma_pseudo(host, SCpnt, direction, transfer) * Purpose : handles pseudo DMA * Params : host - host * SCpnt - command * direction - DMA on to/off of card * transfer - minimum number of bytes we expect to transfer */ static void arxescsi_dma_pseudo(struct Scsi_Host *host, struct scsi_pointer *SCp, fasdmadir_t direction, int transfer) { struct arxescsi_info *info = (struct arxescsi_info *)host->hostdata; unsigned int length, error = 0; void __iomem *base = info->info.scsi.io_base; unsigned char *addr; length = SCp->this_residual; addr = SCp->ptr; if (direction == DMA_OUT) { unsigned int word; while (length > 256) { if (readb(base + 0x80) & STAT_INT) { error = 1; break; } arxescsi_pseudo_dma_write(addr, base); addr += 256; length -= 256; } if (!error) while (length > 0) { if (readb(base + 0x80) & STAT_INT) break; if (!(readb(base + DMASTAT_OFFSET) & DMASTAT_DRQ)) continue; word = *addr | *(addr + 1) << 8; writew(word, base + DMADATA_OFFSET); if (length > 1) { addr += 2; length -= 2; } else { addr += 1; length -= 1; } } } else { if (transfer && (transfer & 255)) { while (length >= 256) { if (readb(base + 0x80) & STAT_INT) { error = 1; break; } if (!(readb(base + DMASTAT_OFFSET) & DMASTAT_DRQ)) continue; readsw(base + DMADATA_OFFSET, addr, 256 >> 1); addr += 256; length -= 256; } } if (!(error)) while (length > 0) { unsigned long word; if (readb(base + 0x80) & STAT_INT) break; if (!(readb(base + DMASTAT_OFFSET) & DMASTAT_DRQ)) continue; word = readw(base + DMADATA_OFFSET); *addr++ = word; if (--length > 0) { *addr++ = word >> 8; length --; } } } } /* * Function: int arxescsi_dma_stop(host, SCpnt) * Purpose : stops DMA/PIO * Params : host - host * SCpnt - command */ static void arxescsi_dma_stop(struct Scsi_Host *host, struct scsi_pointer *SCp) { /* * no DMA to stop */ } /* * Function: const char *arxescsi_info(struct Scsi_Host * host) * Purpose : returns a descriptive string about this interface, * Params : host - driver host structure to return info for. * Returns : pointer to a static buffer containing null terminated string. */ static const char *arxescsi_info(struct Scsi_Host *host) { struct arxescsi_info *info = (struct arxescsi_info *)host->hostdata; static char string[150]; sprintf(string, "%s (%s) in slot %d v%s", host->hostt->name, info->info.scsi.type, info->ec->slot_no, VERSION); return string; } static int arxescsi_show_info(struct seq_file *m, struct Scsi_Host *host) { struct arxescsi_info *info; info = (struct arxescsi_info *)host->hostdata; seq_printf(m, "ARXE 16-bit SCSI driver v%s\n", VERSION); fas216_print_host(&info->info, m); fas216_print_stats(&info->info, m); fas216_print_devices(&info->info, m); return 0; } static const struct scsi_host_template arxescsi_template = { .show_info = arxescsi_show_info, .name = "ARXE SCSI card", .info = arxescsi_info, .queuecommand = fas216_noqueue_command, .eh_host_reset_handler = fas216_eh_host_reset, .eh_bus_reset_handler = fas216_eh_bus_reset, .eh_device_reset_handler = fas216_eh_device_reset, .eh_abort_handler = fas216_eh_abort, .cmd_size = sizeof(struct fas216_cmd_priv), .can_queue = 0, .this_id = 7, .sg_tablesize = SG_ALL, .dma_boundary = PAGE_SIZE - 1, .proc_name = "arxescsi", }; static int arxescsi_probe(struct expansion_card *ec, const struct ecard_id *id) { struct Scsi_Host *host; struct arxescsi_info *info; void __iomem *base; int ret; ret = ecard_request_resources(ec); if (ret) goto out; base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0); if (!base) { ret = -ENOMEM; goto out_region; } host = scsi_host_alloc(&arxescsi_template, sizeof(struct arxescsi_info)); if (!host) { ret = -ENOMEM; goto out_region; } info = (struct arxescsi_info *)host->hostdata; info->ec = ec; info->base = base; info->info.scsi.io_base = base + 0x2000; info->info.scsi.irq = 0; info->info.scsi.dma = NO_DMA; info->info.scsi.io_shift = 5; info->info.ifcfg.clockrate = 24; /* MHz */ info->info.ifcfg.select_timeout = 255; info->info.ifcfg.asyncperiod = 200; /* ns */ info->info.ifcfg.sync_max_depth = 0; info->info.ifcfg.cntl3 = CNTL3_FASTSCSI | CNTL3_FASTCLK; info->info.ifcfg.disconnect_ok = 0; info->info.ifcfg.wide_max_size = 0; info->info.ifcfg.capabilities = FASCAP_PSEUDODMA; info->info.dma.setup = arxescsi_dma_setup; info->info.dma.pseudo = arxescsi_dma_pseudo; info->info.dma.stop = arxescsi_dma_stop; ec->irqaddr = base; ec->irqmask = CSTATUS_IRQ; ret = fas216_init(host); if (ret) goto out_unregister; ret = fas216_add(host, &ec->dev); if (ret == 0) goto out; fas216_release(host); out_unregister: scsi_host_put(host); out_region: ecard_release_resources(ec); out: return ret; } static void arxescsi_remove(struct expansion_card *ec) { struct Scsi_Host *host = ecard_get_drvdata(ec); ecard_set_drvdata(ec, NULL); fas216_remove(host); fas216_release(host); scsi_host_put(host); ecard_release_resources(ec); } static const struct ecard_id arxescsi_cids[] = { { MANU_ARXE, PROD_ARXE_SCSI }, { 0xffff, 0xffff }, }; static struct ecard_driver arxescsi_driver = { .probe = arxescsi_probe, .remove = arxescsi_remove, .id_table = arxescsi_cids, .drv = { .name = "arxescsi", }, }; static int __init init_arxe_scsi_driver(void) { return ecard_register_driver(&arxescsi_driver); } static void __exit exit_arxe_scsi_driver(void) { ecard_remove_driver(&arxescsi_driver); } module_init(init_arxe_scsi_driver); module_exit(exit_arxe_scsi_driver); MODULE_AUTHOR("Stefan Hanske"); MODULE_DESCRIPTION("ARXESCSI driver for Acorn machines"); MODULE_LICENSE("GPL");
linux-master
drivers/scsi/arm/arxescsi.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/drivers/acorn/scsi/queue.c: queue handling primitives * * Copyright (C) 1997-2000 Russell King * * Changelog: * 15-Sep-1997 RMK Created. * 11-Oct-1997 RMK Corrected problem with queue_remove_exclude * not updating internal linked list properly * (was causing commands to go missing). * 30-Aug-2000 RMK Use Linux list handling and spinlocks */ #include <linux/module.h> #include <linux/blkdev.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/list.h> #include <linux/init.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_tcq.h> #define DEBUG typedef struct queue_entry { struct list_head list; struct scsi_cmnd *SCpnt; #ifdef DEBUG unsigned long magic; #endif } QE_t; #ifdef DEBUG #define QUEUE_MAGIC_FREE 0xf7e1c9a3 #define QUEUE_MAGIC_USED 0xf7e1cc33 #define SET_MAGIC(q,m) ((q)->magic = (m)) #define BAD_MAGIC(q,m) ((q)->magic != (m)) #else #define SET_MAGIC(q,m) do { } while (0) #define BAD_MAGIC(q,m) (0) #endif #include "queue.h" #define NR_QE 32 /* * Function: void queue_initialise (Queue_t *queue) * Purpose : initialise a queue * Params : queue - queue to initialise */ int queue_initialise (Queue_t *queue) { unsigned int nqueues = NR_QE; QE_t *q; spin_lock_init(&queue->queue_lock); INIT_LIST_HEAD(&queue->head); INIT_LIST_HEAD(&queue->free); /* * If life was easier, then SCpnt would have a * host-available list head, and we wouldn't * need to keep free lists or allocate this * memory. */ queue->alloc = q = kmalloc_array(nqueues, sizeof(QE_t), GFP_KERNEL); if (q) { for (; nqueues; q++, nqueues--) { SET_MAGIC(q, QUEUE_MAGIC_FREE); q->SCpnt = NULL; list_add(&q->list, &queue->free); } } return queue->alloc != NULL; } /* * Function: void queue_free (Queue_t *queue) * Purpose : free a queue * Params : queue - queue to free */ void queue_free (Queue_t *queue) { if (!list_empty(&queue->head)) printk(KERN_WARNING "freeing non-empty queue %p\n", queue); kfree(queue->alloc); } /* * Function: int __queue_add(Queue_t *queue, struct scsi_cmnd *SCpnt, int head) * Purpose : Add a new command onto a queue, adding REQUEST_SENSE to head. * Params : queue - destination queue * SCpnt - command to add * head - add command to head of queue * Returns : 0 on error, !0 on success */ int __queue_add(Queue_t *queue, struct scsi_cmnd *SCpnt, int head) { unsigned long flags; struct list_head *l; QE_t *q; int ret = 0; spin_lock_irqsave(&queue->queue_lock, flags); if (list_empty(&queue->free)) goto empty; l = queue->free.next; list_del(l); q = list_entry(l, QE_t, list); BUG_ON(BAD_MAGIC(q, QUEUE_MAGIC_FREE)); SET_MAGIC(q, QUEUE_MAGIC_USED); q->SCpnt = SCpnt; if (head) list_add(l, &queue->head); else list_add_tail(l, &queue->head); ret = 1; empty: spin_unlock_irqrestore(&queue->queue_lock, flags); return ret; } static struct scsi_cmnd *__queue_remove(Queue_t *queue, struct list_head *ent) { QE_t *q; /* * Move the entry from the "used" list onto the "free" list */ list_del(ent); q = list_entry(ent, QE_t, list); BUG_ON(BAD_MAGIC(q, QUEUE_MAGIC_USED)); SET_MAGIC(q, QUEUE_MAGIC_FREE); list_add(ent, &queue->free); return q->SCpnt; } /* * Function: struct scsi_cmnd *queue_remove_exclude (queue, exclude) * Purpose : remove a SCSI command from a queue * Params : queue - queue to remove command from * exclude - bit array of target&lun which is busy * Returns : struct scsi_cmnd if successful (and a reference), or NULL if no command available */ struct scsi_cmnd *queue_remove_exclude(Queue_t *queue, unsigned long *exclude) { unsigned long flags; struct list_head *l; struct scsi_cmnd *SCpnt = NULL; spin_lock_irqsave(&queue->queue_lock, flags); list_for_each(l, &queue->head) { QE_t *q = list_entry(l, QE_t, list); if (!test_bit(q->SCpnt->device->id * 8 + (u8)(q->SCpnt->device->lun & 0x7), exclude)) { SCpnt = __queue_remove(queue, l); break; } } spin_unlock_irqrestore(&queue->queue_lock, flags); return SCpnt; } /* * Function: struct scsi_cmnd *queue_remove (queue) * Purpose : removes first SCSI command from a queue * Params : queue - queue to remove command from * Returns : struct scsi_cmnd if successful (and a reference), or NULL if no command available */ struct scsi_cmnd *queue_remove(Queue_t *queue) { unsigned long flags; struct scsi_cmnd *SCpnt = NULL; spin_lock_irqsave(&queue->queue_lock, flags); if (!list_empty(&queue->head)) SCpnt = __queue_remove(queue, queue->head.next); spin_unlock_irqrestore(&queue->queue_lock, flags); return SCpnt; } /* * Function: struct scsi_cmnd *queue_remove_tgtluntag (queue, target, lun, tag) * Purpose : remove a SCSI command from the queue for a specified target/lun/tag * Params : queue - queue to remove command from * target - target that we want * lun - lun on device * tag - tag on device * Returns : struct scsi_cmnd if successful, or NULL if no command satisfies requirements */ struct scsi_cmnd *queue_remove_tgtluntag(Queue_t *queue, int target, int lun, int tag) { unsigned long flags; struct list_head *l; struct scsi_cmnd *SCpnt = NULL; spin_lock_irqsave(&queue->queue_lock, flags); list_for_each(l, &queue->head) { QE_t *q = list_entry(l, QE_t, list); if (q->SCpnt->device->id == target && q->SCpnt->device->lun == lun && scsi_cmd_to_rq(q->SCpnt)->tag == tag) { SCpnt = __queue_remove(queue, l); break; } } spin_unlock_irqrestore(&queue->queue_lock, flags); return SCpnt; } /* * Function: queue_remove_all_target(queue, target) * Purpose : remove all SCSI commands from the queue for a specified target * Params : queue - queue to remove command from * target - target device id * Returns : nothing */ void queue_remove_all_target(Queue_t *queue, int target) { unsigned long flags; struct list_head *l; spin_lock_irqsave(&queue->queue_lock, flags); list_for_each(l, &queue->head) { QE_t *q = list_entry(l, QE_t, list); if (q->SCpnt->device->id == target) __queue_remove(queue, l); } spin_unlock_irqrestore(&queue->queue_lock, flags); } /* * Function: int queue_probetgtlun (queue, target, lun) * Purpose : check to see if we have a command in the queue for the specified * target/lun. * Params : queue - queue to look in * target - target we want to probe * lun - lun on target * Returns : 0 if not found, != 0 if found */ int queue_probetgtlun (Queue_t *queue, int target, int lun) { unsigned long flags; struct list_head *l; int found = 0; spin_lock_irqsave(&queue->queue_lock, flags); list_for_each(l, &queue->head) { QE_t *q = list_entry(l, QE_t, list); if (q->SCpnt->device->id == target && q->SCpnt->device->lun == lun) { found = 1; break; } } spin_unlock_irqrestore(&queue->queue_lock, flags); return found; } /* * Function: int queue_remove_cmd(Queue_t *queue, struct scsi_cmnd *SCpnt) * Purpose : remove a specific command from the queues * Params : queue - queue to look in * SCpnt - command to find * Returns : 0 if not found */ int queue_remove_cmd(Queue_t *queue, struct scsi_cmnd *SCpnt) { unsigned long flags; struct list_head *l; int found = 0; spin_lock_irqsave(&queue->queue_lock, flags); list_for_each(l, &queue->head) { QE_t *q = list_entry(l, QE_t, list); if (q->SCpnt == SCpnt) { __queue_remove(queue, l); found = 1; break; } } spin_unlock_irqrestore(&queue->queue_lock, flags); return found; } EXPORT_SYMBOL(queue_initialise); EXPORT_SYMBOL(queue_free); EXPORT_SYMBOL(__queue_add); EXPORT_SYMBOL(queue_remove); EXPORT_SYMBOL(queue_remove_exclude); EXPORT_SYMBOL(queue_remove_tgtluntag); EXPORT_SYMBOL(queue_remove_cmd); EXPORT_SYMBOL(queue_remove_all_target); EXPORT_SYMBOL(queue_probetgtlun); MODULE_AUTHOR("Russell King"); MODULE_DESCRIPTION("SCSI command queueing"); MODULE_LICENSE("GPL");
linux-master
drivers/scsi/arm/queue.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/drivers/acorn/scsi/powertec.c * * Copyright (C) 1997-2005 Russell King */ #include <linux/module.h> #include <linux/blkdev.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/ioport.h> #include <linux/proc_fs.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/dma-mapping.h> #include <linux/pgtable.h> #include <asm/dma.h> #include <asm/ecard.h> #include <asm/io.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include "fas216.h" #include "arm_scsi.h" #include <scsi/scsicam.h> #define POWERTEC_FAS216_OFFSET 0x3000 #define POWERTEC_FAS216_SHIFT 6 #define POWERTEC_INTR_STATUS 0x2000 #define POWERTEC_INTR_BIT 0x80 #define POWERTEC_RESET_CONTROL 0x1018 #define POWERTEC_RESET_BIT 1 #define POWERTEC_TERM_CONTROL 0x2018 #define POWERTEC_TERM_ENABLE 1 #define POWERTEC_INTR_CONTROL 0x101c #define POWERTEC_INTR_ENABLE 1 #define POWERTEC_INTR_DISABLE 0 #define VERSION "1.10 (19/01/2003 2.5.59)" /* * Use term=0,1,0,0,0 to turn terminators on/off. * One entry per slot. */ static int term[MAX_ECARDS] = { 1, 1, 1, 1, 1, 1, 1, 1 }; #define NR_SG 256 struct powertec_info { FAS216_Info info; struct expansion_card *ec; void __iomem *base; unsigned int term_ctl; struct scatterlist sg[NR_SG]; }; /* Prototype: void powertecscsi_irqenable(ec, irqnr) * Purpose : Enable interrupts on Powertec SCSI card * Params : ec - expansion card structure * : irqnr - interrupt number */ static void powertecscsi_irqenable(struct expansion_card *ec, int irqnr) { struct powertec_info *info = ec->irq_data; writeb(POWERTEC_INTR_ENABLE, info->base + POWERTEC_INTR_CONTROL); } /* Prototype: void powertecscsi_irqdisable(ec, irqnr) * Purpose : Disable interrupts on Powertec SCSI card * Params : ec - expansion card structure * : irqnr - interrupt number */ static void powertecscsi_irqdisable(struct expansion_card *ec, int irqnr) { struct powertec_info *info = ec->irq_data; writeb(POWERTEC_INTR_DISABLE, info->base + POWERTEC_INTR_CONTROL); } static const expansioncard_ops_t powertecscsi_ops = { .irqenable = powertecscsi_irqenable, .irqdisable = powertecscsi_irqdisable, }; /* Prototype: void powertecscsi_terminator_ctl(host, on_off) * Purpose : Turn the Powertec SCSI terminators on or off * Params : host - card to turn on/off * : on_off - !0 to turn on, 0 to turn off */ static void powertecscsi_terminator_ctl(struct Scsi_Host *host, int on_off) { struct powertec_info *info = (struct powertec_info *)host->hostdata; info->term_ctl = on_off ? POWERTEC_TERM_ENABLE : 0; writeb(info->term_ctl, info->base + POWERTEC_TERM_CONTROL); } /* Prototype: void powertecscsi_intr(irq, *dev_id, *regs) * Purpose : handle interrupts from Powertec SCSI card * Params : irq - interrupt number * dev_id - user-defined (Scsi_Host structure) */ static irqreturn_t powertecscsi_intr(int irq, void *dev_id) { struct powertec_info *info = dev_id; return fas216_intr(&info->info); } /* Prototype: fasdmatype_t powertecscsi_dma_setup(host, SCpnt, direction, min_type) * Purpose : initialises DMA/PIO * Params : host - host * SCpnt - command * direction - DMA on to/off of card * min_type - minimum DMA support that we must have for this transfer * Returns : type of transfer to be performed */ static fasdmatype_t powertecscsi_dma_setup(struct Scsi_Host *host, struct scsi_pointer *SCp, fasdmadir_t direction, fasdmatype_t min_type) { struct powertec_info *info = (struct powertec_info *)host->hostdata; struct device *dev = scsi_get_device(host); int dmach = info->info.scsi.dma; if (info->info.ifcfg.capabilities & FASCAP_DMA && min_type == fasdma_real_all) { int bufs, map_dir, dma_dir; bufs = copy_SCp_to_sg(&info->sg[0], SCp, NR_SG); if (direction == DMA_OUT) { map_dir = DMA_TO_DEVICE; dma_dir = DMA_MODE_WRITE; } else { map_dir = DMA_FROM_DEVICE; dma_dir = DMA_MODE_READ; } dma_map_sg(dev, info->sg, bufs, map_dir); disable_dma(dmach); set_dma_sg(dmach, info->sg, bufs); set_dma_mode(dmach, dma_dir); enable_dma(dmach); return fasdma_real_all; } /* * If we're not doing DMA, * we'll do slow PIO */ return fasdma_pio; } /* Prototype: int powertecscsi_dma_stop(host, SCpnt) * Purpose : stops DMA/PIO * Params : host - host * SCpnt - command */ static void powertecscsi_dma_stop(struct Scsi_Host *host, struct scsi_pointer *SCp) { struct powertec_info *info = (struct powertec_info *)host->hostdata; if (info->info.scsi.dma != NO_DMA) disable_dma(info->info.scsi.dma); } /* Prototype: const char *powertecscsi_info(struct Scsi_Host * host) * Purpose : returns a descriptive string about this interface, * Params : host - driver host structure to return info for. * Returns : pointer to a static buffer containing null terminated string. */ const char *powertecscsi_info(struct Scsi_Host *host) { struct powertec_info *info = (struct powertec_info *)host->hostdata; static char string[150]; sprintf(string, "%s (%s) in slot %d v%s terminators o%s", host->hostt->name, info->info.scsi.type, info->ec->slot_no, VERSION, info->term_ctl ? "n" : "ff"); return string; } /* Prototype: int powertecscsi_set_proc_info(struct Scsi_Host *host, char *buffer, int length) * Purpose : Set a driver specific function * Params : host - host to setup * : buffer - buffer containing string describing operation * : length - length of string * Returns : -EINVAL, or 0 */ static int powertecscsi_set_proc_info(struct Scsi_Host *host, char *buffer, int length) { int ret = length; if (length >= 12 && strncmp(buffer, "POWERTECSCSI", 12) == 0) { buffer += 12; length -= 12; if (length >= 5 && strncmp(buffer, "term=", 5) == 0) { if (buffer[5] == '1') powertecscsi_terminator_ctl(host, 1); else if (buffer[5] == '0') powertecscsi_terminator_ctl(host, 0); else ret = -EINVAL; } else ret = -EINVAL; } else ret = -EINVAL; return ret; } /* Prototype: int powertecscsi_proc_info(char *buffer, char **start, off_t offset, * int length, int host_no, int inout) * Purpose : Return information about the driver to a user process accessing * the /proc filesystem. * Params : buffer - a buffer to write information to * start - a pointer into this buffer set by this routine to the start * of the required information. * offset - offset into information that we have read up to. * length - length of buffer * inout - 0 for reading, 1 for writing. * Returns : length of data written to buffer. */ static int powertecscsi_show_info(struct seq_file *m, struct Scsi_Host *host) { struct powertec_info *info; info = (struct powertec_info *)host->hostdata; seq_printf(m, "PowerTec SCSI driver v%s\n", VERSION); fas216_print_host(&info->info, m); seq_printf(m, "Term : o%s\n", info->term_ctl ? "n" : "ff"); fas216_print_stats(&info->info, m); fas216_print_devices(&info->info, m); return 0; } static ssize_t powertecscsi_show_term(struct device *dev, struct device_attribute *attr, char *buf) { struct expansion_card *ec = ECARD_DEV(dev); struct Scsi_Host *host = ecard_get_drvdata(ec); struct powertec_info *info = (struct powertec_info *)host->hostdata; return sprintf(buf, "%d\n", info->term_ctl ? 1 : 0); } static ssize_t powertecscsi_store_term(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct expansion_card *ec = ECARD_DEV(dev); struct Scsi_Host *host = ecard_get_drvdata(ec); if (len > 1) powertecscsi_terminator_ctl(host, buf[0] != '0'); return len; } static DEVICE_ATTR(bus_term, S_IRUGO | S_IWUSR, powertecscsi_show_term, powertecscsi_store_term); static const struct scsi_host_template powertecscsi_template = { .module = THIS_MODULE, .show_info = powertecscsi_show_info, .write_info = powertecscsi_set_proc_info, .name = "PowerTec SCSI", .info = powertecscsi_info, .queuecommand = fas216_queue_command, .eh_host_reset_handler = fas216_eh_host_reset, .eh_bus_reset_handler = fas216_eh_bus_reset, .eh_device_reset_handler = fas216_eh_device_reset, .eh_abort_handler = fas216_eh_abort, .cmd_size = sizeof(struct fas216_cmd_priv), .can_queue = 8, .this_id = 7, .sg_tablesize = SG_MAX_SEGMENTS, .dma_boundary = IOMD_DMA_BOUNDARY, .cmd_per_lun = 2, .proc_name = "powertec", }; static int powertecscsi_probe(struct expansion_card *ec, const struct ecard_id *id) { struct Scsi_Host *host; struct powertec_info *info; void __iomem *base; int ret; ret = ecard_request_resources(ec); if (ret) goto out; base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); if (!base) { ret = -ENOMEM; goto out_region; } host = scsi_host_alloc(&powertecscsi_template, sizeof (struct powertec_info)); if (!host) { ret = -ENOMEM; goto out_region; } ecard_set_drvdata(ec, host); info = (struct powertec_info *)host->hostdata; info->base = base; powertecscsi_terminator_ctl(host, term[ec->slot_no]); info->ec = ec; info->info.scsi.io_base = base + POWERTEC_FAS216_OFFSET; info->info.scsi.io_shift = POWERTEC_FAS216_SHIFT; info->info.scsi.irq = ec->irq; info->info.scsi.dma = ec->dma; info->info.ifcfg.clockrate = 40; /* MHz */ info->info.ifcfg.select_timeout = 255; info->info.ifcfg.asyncperiod = 200; /* ns */ info->info.ifcfg.sync_max_depth = 7; info->info.ifcfg.cntl3 = CNTL3_BS8 | CNTL3_FASTSCSI | CNTL3_FASTCLK; info->info.ifcfg.disconnect_ok = 1; info->info.ifcfg.wide_max_size = 0; info->info.ifcfg.capabilities = 0; info->info.dma.setup = powertecscsi_dma_setup; info->info.dma.pseudo = NULL; info->info.dma.stop = powertecscsi_dma_stop; ec->irqaddr = base + POWERTEC_INTR_STATUS; ec->irqmask = POWERTEC_INTR_BIT; ecard_setirq(ec, &powertecscsi_ops, info); device_create_file(&ec->dev, &dev_attr_bus_term); ret = fas216_init(host); if (ret) goto out_free; ret = request_irq(ec->irq, powertecscsi_intr, 0, "powertec", info); if (ret) { printk("scsi%d: IRQ%d not free: %d\n", host->host_no, ec->irq, ret); goto out_release; } if (info->info.scsi.dma != NO_DMA) { if (request_dma(info->info.scsi.dma, "powertec")) { printk("scsi%d: DMA%d not free, using PIO\n", host->host_no, info->info.scsi.dma); info->info.scsi.dma = NO_DMA; } else { set_dma_speed(info->info.scsi.dma, 180); info->info.ifcfg.capabilities |= FASCAP_DMA; } } ret = fas216_add(host, &ec->dev); if (ret == 0) goto out; if (info->info.scsi.dma != NO_DMA) free_dma(info->info.scsi.dma); free_irq(ec->irq, info); out_release: fas216_release(host); out_free: device_remove_file(&ec->dev, &dev_attr_bus_term); scsi_host_put(host); out_region: ecard_release_resources(ec); out: return ret; } static void powertecscsi_remove(struct expansion_card *ec) { struct Scsi_Host *host = ecard_get_drvdata(ec); struct powertec_info *info = (struct powertec_info *)host->hostdata; ecard_set_drvdata(ec, NULL); fas216_remove(host); device_remove_file(&ec->dev, &dev_attr_bus_term); if (info->info.scsi.dma != NO_DMA) free_dma(info->info.scsi.dma); free_irq(ec->irq, info); fas216_release(host); scsi_host_put(host); ecard_release_resources(ec); } static const struct ecard_id powertecscsi_cids[] = { { MANU_ALSYSTEMS, PROD_ALSYS_SCSIATAPI }, { 0xffff, 0xffff }, }; static struct ecard_driver powertecscsi_driver = { .probe = powertecscsi_probe, .remove = powertecscsi_remove, .id_table = powertecscsi_cids, .drv = { .name = "powertecscsi", }, }; static int __init powertecscsi_init(void) { return ecard_register_driver(&powertecscsi_driver); } static void __exit powertecscsi_exit(void) { ecard_remove_driver(&powertecscsi_driver); } module_init(powertecscsi_init); module_exit(powertecscsi_exit); MODULE_AUTHOR("Russell King"); MODULE_DESCRIPTION("Powertec SCSI driver"); module_param_array(term, int, NULL, 0); MODULE_PARM_DESC(term, "SCSI bus termination"); MODULE_LICENSE("GPL");
linux-master
drivers/scsi/arm/powertec.c
// SPDX-License-Identifier: GPL-2.0-only /* * Generic Generic NCR5380 driver * * Copyright 1995-2002, Russell King */ #include <linux/module.h> #include <linux/ioport.h> #include <linux/blkdev.h> #include <linux/init.h> #include <asm/ecard.h> #include <asm/io.h> #include <scsi/scsi_host.h> #define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata) #define NCR5380_read(reg) cumanascsi_read(hostdata, reg) #define NCR5380_write(reg, value) cumanascsi_write(hostdata, reg, value) #define NCR5380_dma_xfer_len cumanascsi_dma_xfer_len #define NCR5380_dma_recv_setup cumanascsi_pread #define NCR5380_dma_send_setup cumanascsi_pwrite #define NCR5380_dma_residual NCR5380_dma_residual_none #define NCR5380_intr cumanascsi_intr #define NCR5380_queue_command cumanascsi_queue_command #define NCR5380_info cumanascsi_info #define NCR5380_implementation_fields \ unsigned ctrl struct NCR5380_hostdata; static u8 cumanascsi_read(struct NCR5380_hostdata *, unsigned int); static void cumanascsi_write(struct NCR5380_hostdata *, unsigned int, u8); #include "../NCR5380.h" #define CTRL 0x16fc #define STAT 0x2004 #define L(v) (((v)<<16)|((v) & 0x0000ffff)) #define H(v) (((v)>>16)|((v) & 0xffff0000)) static inline int cumanascsi_pwrite(struct NCR5380_hostdata *hostdata, unsigned char *addr, int len) { unsigned long *laddr; u8 __iomem *base = hostdata->io; u8 __iomem *dma = hostdata->pdma_io + 0x2000; if(!len) return 0; writeb(0x02, base + CTRL); laddr = (unsigned long *)addr; while(len >= 32) { unsigned int status; unsigned long v; status = readb(base + STAT); if(status & 0x80) goto end; if(!(status & 0x40)) continue; v=*laddr++; writew(L(v), dma); writew(H(v), dma); v=*laddr++; writew(L(v), dma); writew(H(v), dma); v=*laddr++; writew(L(v), dma); writew(H(v), dma); v=*laddr++; writew(L(v), dma); writew(H(v), dma); v=*laddr++; writew(L(v), dma); writew(H(v), dma); v=*laddr++; writew(L(v), dma); writew(H(v), dma); v=*laddr++; writew(L(v), dma); writew(H(v), dma); v=*laddr++; writew(L(v), dma); writew(H(v), dma); len -= 32; if(len == 0) break; } addr = (unsigned char *)laddr; writeb(0x12, base + CTRL); while(len > 0) { unsigned int status; status = readb(base + STAT); if(status & 0x80) goto end; if(status & 0x40) { writeb(*addr++, dma); if(--len == 0) break; } status = readb(base + STAT); if(status & 0x80) goto end; if(status & 0x40) { writeb(*addr++, dma); if(--len == 0) break; } } end: writeb(hostdata->ctrl | 0x40, base + CTRL); if (len) return -1; return 0; } static inline int cumanascsi_pread(struct NCR5380_hostdata *hostdata, unsigned char *addr, int len) { unsigned long *laddr; u8 __iomem *base = hostdata->io; u8 __iomem *dma = hostdata->pdma_io + 0x2000; if(!len) return 0; writeb(0x00, base + CTRL); laddr = (unsigned long *)addr; while(len >= 32) { unsigned int status; status = readb(base + STAT); if(status & 0x80) goto end; if(!(status & 0x40)) continue; *laddr++ = readw(dma) | (readw(dma) << 16); *laddr++ = readw(dma) | (readw(dma) << 16); *laddr++ = readw(dma) | (readw(dma) << 16); *laddr++ = readw(dma) | (readw(dma) << 16); *laddr++ = readw(dma) | (readw(dma) << 16); *laddr++ = readw(dma) | (readw(dma) << 16); *laddr++ = readw(dma) | (readw(dma) << 16); *laddr++ = readw(dma) | (readw(dma) << 16); len -= 32; if(len == 0) break; } addr = (unsigned char *)laddr; writeb(0x10, base + CTRL); while(len > 0) { unsigned int status; status = readb(base + STAT); if(status & 0x80) goto end; if(status & 0x40) { *addr++ = readb(dma); if(--len == 0) break; } status = readb(base + STAT); if(status & 0x80) goto end; if(status & 0x40) { *addr++ = readb(dma); if(--len == 0) break; } } end: writeb(hostdata->ctrl | 0x40, base + CTRL); if (len) return -1; return 0; } static int cumanascsi_dma_xfer_len(struct NCR5380_hostdata *hostdata, struct scsi_cmnd *cmd) { return cmd->transfersize; } static u8 cumanascsi_read(struct NCR5380_hostdata *hostdata, unsigned int reg) { u8 __iomem *base = hostdata->io; u8 val; writeb(0, base + CTRL); val = readb(base + 0x2100 + (reg << 2)); hostdata->ctrl = 0x40; writeb(0x40, base + CTRL); return val; } static void cumanascsi_write(struct NCR5380_hostdata *hostdata, unsigned int reg, u8 value) { u8 __iomem *base = hostdata->io; writeb(0, base + CTRL); writeb(value, base + 0x2100 + (reg << 2)); hostdata->ctrl = 0x40; writeb(0x40, base + CTRL); } #include "../NCR5380.c" static const struct scsi_host_template cumanascsi_template = { .module = THIS_MODULE, .name = "Cumana 16-bit SCSI", .info = cumanascsi_info, .queuecommand = cumanascsi_queue_command, .eh_abort_handler = NCR5380_abort, .eh_host_reset_handler = NCR5380_host_reset, .can_queue = 16, .this_id = 7, .sg_tablesize = SG_ALL, .cmd_per_lun = 2, .proc_name = "CumanaSCSI-1", .cmd_size = sizeof(struct NCR5380_cmd), .max_sectors = 128, .dma_boundary = PAGE_SIZE - 1, }; static int cumanascsi1_probe(struct expansion_card *ec, const struct ecard_id *id) { struct Scsi_Host *host; int ret; ret = ecard_request_resources(ec); if (ret) goto out; host = scsi_host_alloc(&cumanascsi_template, sizeof(struct NCR5380_hostdata)); if (!host) { ret = -ENOMEM; goto out_release; } priv(host)->io = ioremap(ecard_resource_start(ec, ECARD_RES_IOCSLOW), ecard_resource_len(ec, ECARD_RES_IOCSLOW)); priv(host)->pdma_io = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC), ecard_resource_len(ec, ECARD_RES_MEMC)); if (!priv(host)->io || !priv(host)->pdma_io) { ret = -ENOMEM; goto out_unmap; } host->irq = ec->irq; ret = NCR5380_init(host, FLAG_DMA_FIXUP | FLAG_LATE_DMA_SETUP); if (ret) goto out_unmap; NCR5380_maybe_reset_bus(host); priv(host)->ctrl = 0; writeb(0, priv(host)->io + CTRL); ret = request_irq(host->irq, cumanascsi_intr, 0, "CumanaSCSI-1", host); if (ret) { printk("scsi%d: IRQ%d not free: %d\n", host->host_no, host->irq, ret); goto out_exit; } ret = scsi_add_host(host, &ec->dev); if (ret) goto out_free_irq; scsi_scan_host(host); goto out; out_free_irq: free_irq(host->irq, host); out_exit: NCR5380_exit(host); out_unmap: iounmap(priv(host)->io); iounmap(priv(host)->pdma_io); scsi_host_put(host); out_release: ecard_release_resources(ec); out: return ret; } static void cumanascsi1_remove(struct expansion_card *ec) { struct Scsi_Host *host = ecard_get_drvdata(ec); void __iomem *base = priv(host)->io; void __iomem *dma = priv(host)->pdma_io; ecard_set_drvdata(ec, NULL); scsi_remove_host(host); free_irq(host->irq, host); NCR5380_exit(host); scsi_host_put(host); iounmap(base); iounmap(dma); ecard_release_resources(ec); } static const struct ecard_id cumanascsi1_cids[] = { { MANU_CUMANA, PROD_CUMANA_SCSI_1 }, { 0xffff, 0xffff } }; static struct ecard_driver cumanascsi1_driver = { .probe = cumanascsi1_probe, .remove = cumanascsi1_remove, .id_table = cumanascsi1_cids, .drv = { .name = "cumanascsi1", }, }; static int __init cumanascsi_init(void) { return ecard_register_driver(&cumanascsi1_driver); } static void __exit cumanascsi_exit(void) { ecard_remove_driver(&cumanascsi1_driver); } module_init(cumanascsi_init); module_exit(cumanascsi_exit); MODULE_DESCRIPTION("Cumana SCSI-1 driver for Acorn machines"); MODULE_LICENSE("GPL");
linux-master
drivers/scsi/arm/cumana_1.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/drivers/acorn/scsi/msgqueue.c * * Copyright (C) 1997-1998 Russell King * * message queue handling */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/stddef.h> #include <linux/init.h> #include "msgqueue.h" /* * Function: struct msgqueue_entry *mqe_alloc(MsgQueue_t *msgq) * Purpose : Allocate a message queue entry * Params : msgq - message queue to claim entry for * Returns : message queue entry or NULL. */ static struct msgqueue_entry *mqe_alloc(MsgQueue_t *msgq) { struct msgqueue_entry *mq; if ((mq = msgq->free) != NULL) msgq->free = mq->next; return mq; } /* * Function: void mqe_free(MsgQueue_t *msgq, struct msgqueue_entry *mq) * Purpose : free a message queue entry * Params : msgq - message queue to free entry from * mq - message queue entry to free */ static void mqe_free(MsgQueue_t *msgq, struct msgqueue_entry *mq) { if (mq) { mq->next = msgq->free; msgq->free = mq; } } /* * Function: void msgqueue_initialise(MsgQueue_t *msgq) * Purpose : initialise a message queue * Params : msgq - queue to initialise */ void msgqueue_initialise(MsgQueue_t *msgq) { int i; msgq->qe = NULL; msgq->free = &msgq->entries[0]; for (i = 0; i < NR_MESSAGES; i++) msgq->entries[i].next = &msgq->entries[i + 1]; msgq->entries[NR_MESSAGES - 1].next = NULL; } /* * Function: void msgqueue_free(MsgQueue_t *msgq) * Purpose : free a queue * Params : msgq - queue to free */ void msgqueue_free(MsgQueue_t *msgq) { } /* * Function: int msgqueue_msglength(MsgQueue_t *msgq) * Purpose : calculate the total length of all messages on the message queue * Params : msgq - queue to examine * Returns : number of bytes of messages in queue */ int msgqueue_msglength(MsgQueue_t *msgq) { struct msgqueue_entry *mq = msgq->qe; int length = 0; for (mq = msgq->qe; mq; mq = mq->next) length += mq->msg.length; return length; } /* * Function: struct message *msgqueue_getmsg(MsgQueue_t *msgq, int msgno) * Purpose : return a message * Params : msgq - queue to obtain message from * : msgno - message number * Returns : pointer to message string, or NULL */ struct message *msgqueue_getmsg(MsgQueue_t *msgq, int msgno) { struct msgqueue_entry *mq; for (mq = msgq->qe; mq && msgno; mq = mq->next, msgno--); return mq ? &mq->msg : NULL; } /* * Function: int msgqueue_addmsg(MsgQueue_t *msgq, int length, ...) * Purpose : add a message onto a message queue * Params : msgq - queue to add message on * length - length of message * ... - message bytes * Returns : != 0 if successful */ int msgqueue_addmsg(MsgQueue_t *msgq, int length, ...) { struct msgqueue_entry *mq = mqe_alloc(msgq); va_list ap; if (mq) { struct msgqueue_entry **mqp; int i; va_start(ap, length); for (i = 0; i < length; i++) mq->msg.msg[i] = va_arg(ap, unsigned int); va_end(ap); mq->msg.length = length; mq->msg.fifo = 0; mq->next = NULL; mqp = &msgq->qe; while (*mqp) mqp = &(*mqp)->next; *mqp = mq; } return mq != NULL; } /* * Function: void msgqueue_flush(MsgQueue_t *msgq) * Purpose : flush all messages from message queue * Params : msgq - queue to flush */ void msgqueue_flush(MsgQueue_t *msgq) { struct msgqueue_entry *mq, *mqnext; for (mq = msgq->qe; mq; mq = mqnext) { mqnext = mq->next; mqe_free(msgq, mq); } msgq->qe = NULL; } EXPORT_SYMBOL(msgqueue_initialise); EXPORT_SYMBOL(msgqueue_free); EXPORT_SYMBOL(msgqueue_msglength); EXPORT_SYMBOL(msgqueue_getmsg); EXPORT_SYMBOL(msgqueue_addmsg); EXPORT_SYMBOL(msgqueue_flush); MODULE_AUTHOR("Russell King"); MODULE_DESCRIPTION("SCSI message queue handling"); MODULE_LICENSE("GPL");
linux-master
drivers/scsi/arm/msgqueue.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/drivers/acorn/scsi/fas216.c * * Copyright (C) 1997-2003 Russell King * * Based on information in qlogicfas.c by Tom Zerucha, Michael Griffith, and * other sources, including: * the AMD Am53CF94 data sheet * the AMD Am53C94 data sheet * * This is a generic driver. To use it, have a look at cumana_2.c. You * should define your own structure that overlays FAS216_Info, eg: * struct my_host_data { * FAS216_Info info; * ... my host specific data ... * }; * * Changelog: * 30-08-1997 RMK Created * 14-09-1997 RMK Started disconnect support * 08-02-1998 RMK Corrected real DMA support * 15-02-1998 RMK Started sync xfer support * 06-04-1998 RMK Tightened conditions for printing incomplete * transfers * 02-05-1998 RMK Added extra checks in fas216_reset * 24-05-1998 RMK Fixed synchronous transfers with period >= 200ns * 27-06-1998 RMK Changed asm/delay.h to linux/delay.h * 26-08-1998 RMK Improved message support wrt MESSAGE_REJECT * 02-04-2000 RMK Converted to use the new error handling, and * automatically request sense data upon check * condition status from targets. */ #include <linux/module.h> #include <linux/blkdev.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/ioport.h> #include <linux/proc_fs.h> #include <linux/delay.h> #include <linux/bitops.h> #include <linux/init.h> #include <linux/interrupt.h> #include <asm/dma.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/ecard.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_dbg.h> #include <scsi/scsi_device.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include "fas216.h" #include "arm_scsi.h" /* NOTE: SCSI2 Synchronous transfers *require* DMA according to * the data sheet. This restriction is crazy, especially when * you only want to send 16 bytes! What were the guys who * designed this chip on at that time? Did they read the SCSI2 * spec at all? The following sections are taken from the SCSI2 * standard (s2r10) concerning this: * * > IMPLEMENTORS NOTES: * > (1) Re-negotiation at every selection is not recommended, since a * > significant performance impact is likely. * * > The implied synchronous agreement shall remain in effect until a BUS DEVICE * > RESET message is received, until a hard reset condition occurs, or until one * > of the two SCSI devices elects to modify the agreement. The default data * > transfer mode is asynchronous data transfer mode. The default data transfer * > mode is entered at power on, after a BUS DEVICE RESET message, or after a hard * > reset condition. * * In total, this means that once you have elected to use synchronous * transfers, you must always use DMA. * * I was thinking that this was a good chip until I found this restriction ;( */ #define SCSI2_SYNC #undef DEBUG_CONNECT #undef DEBUG_MESSAGES #undef CHECK_STRUCTURE #define LOG_CONNECT (1 << 0) #define LOG_BUSSERVICE (1 << 1) #define LOG_FUNCTIONDONE (1 << 2) #define LOG_MESSAGES (1 << 3) #define LOG_BUFFER (1 << 4) #define LOG_ERROR (1 << 8) static int level_mask = LOG_ERROR; module_param(level_mask, int, 0644); #ifndef MODULE static int __init fas216_log_setup(char *str) { char *s; level_mask = 0; while ((s = strsep(&str, ",")) != NULL) { switch (s[0]) { case 'a': if (strcmp(s, "all") == 0) level_mask |= -1; break; case 'b': if (strncmp(s, "bus", 3) == 0) level_mask |= LOG_BUSSERVICE; if (strncmp(s, "buf", 3) == 0) level_mask |= LOG_BUFFER; break; case 'c': level_mask |= LOG_CONNECT; break; case 'e': level_mask |= LOG_ERROR; break; case 'm': level_mask |= LOG_MESSAGES; break; case 'n': if (strcmp(s, "none") == 0) level_mask = 0; break; case 's': level_mask |= LOG_FUNCTIONDONE; break; } } return 1; } __setup("fas216_logging=", fas216_log_setup); #endif static inline unsigned char fas216_readb(FAS216_Info *info, unsigned int reg) { unsigned int off = reg << info->scsi.io_shift; return readb(info->scsi.io_base + off); } static inline void fas216_writeb(FAS216_Info *info, unsigned int reg, unsigned int val) { unsigned int off = reg << info->scsi.io_shift; writeb(val, info->scsi.io_base + off); } static void fas216_dumpstate(FAS216_Info *info) { unsigned char is, stat, inst; is = fas216_readb(info, REG_IS); stat = fas216_readb(info, REG_STAT); inst = fas216_readb(info, REG_INST); printk("FAS216: CTCL=%02X CTCM=%02X CMD=%02X STAT=%02X" " INST=%02X IS=%02X CFIS=%02X", fas216_readb(info, REG_CTCL), fas216_readb(info, REG_CTCM), fas216_readb(info, REG_CMD), stat, inst, is, fas216_readb(info, REG_CFIS)); printk(" CNTL1=%02X CNTL2=%02X CNTL3=%02X CTCH=%02X\n", fas216_readb(info, REG_CNTL1), fas216_readb(info, REG_CNTL2), fas216_readb(info, REG_CNTL3), fas216_readb(info, REG_CTCH)); } static void print_SCp(struct scsi_pointer *SCp, const char *prefix, const char *suffix) { printk("%sptr %p this_residual 0x%x buffer %p buffers_residual 0x%x%s", prefix, SCp->ptr, SCp->this_residual, SCp->buffer, SCp->buffers_residual, suffix); } #ifdef CHECK_STRUCTURE static void fas216_dumpinfo(FAS216_Info *info) { static int used = 0; int i; if (used++) return; printk("FAS216_Info=\n"); printk(" { magic_start=%lX host=%p SCpnt=%p origSCpnt=%p\n", info->magic_start, info->host, info->SCpnt, info->origSCpnt); printk(" scsi={ io_shift=%X irq=%X cfg={ %X %X %X %X }\n", info->scsi.io_shift, info->scsi.irq, info->scsi.cfg[0], info->scsi.cfg[1], info->scsi.cfg[2], info->scsi.cfg[3]); printk(" type=%p phase=%X\n", info->scsi.type, info->scsi.phase); print_SCp(&info->scsi.SCp, " SCp={ ", " }\n"); printk(" msgs async_stp=%X disconnectable=%d aborting=%d }\n", info->scsi.async_stp, info->scsi.disconnectable, info->scsi.aborting); printk(" stats={ queues=%X removes=%X fins=%X reads=%X writes=%X miscs=%X\n" " disconnects=%X aborts=%X bus_resets=%X host_resets=%X}\n", info->stats.queues, info->stats.removes, info->stats.fins, info->stats.reads, info->stats.writes, info->stats.miscs, info->stats.disconnects, info->stats.aborts, info->stats.bus_resets, info->stats.host_resets); printk(" ifcfg={ clockrate=%X select_timeout=%X asyncperiod=%X sync_max_depth=%X }\n", info->ifcfg.clockrate, info->ifcfg.select_timeout, info->ifcfg.asyncperiod, info->ifcfg.sync_max_depth); for (i = 0; i < 8; i++) { printk(" busyluns[%d]=%08lx dev[%d]={ disconnect_ok=%d stp=%X sof=%X sync_state=%X }\n", i, info->busyluns[i], i, info->device[i].disconnect_ok, info->device[i].stp, info->device[i].sof, info->device[i].sync_state); } printk(" dma={ transfer_type=%X setup=%p pseudo=%p stop=%p }\n", info->dma.transfer_type, info->dma.setup, info->dma.pseudo, info->dma.stop); printk(" internal_done=%X magic_end=%lX }\n", info->internal_done, info->magic_end); } static void __fas216_checkmagic(FAS216_Info *info, const char *func) { int corruption = 0; if (info->magic_start != MAGIC) { printk(KERN_CRIT "FAS216 Error: magic at start corrupted\n"); corruption++; } if (info->magic_end != MAGIC) { printk(KERN_CRIT "FAS216 Error: magic at end corrupted\n"); corruption++; } if (corruption) { fas216_dumpinfo(info); panic("scsi memory space corrupted in %s", func); } } #define fas216_checkmagic(info) __fas216_checkmagic((info), __func__) #else #define fas216_checkmagic(info) #endif static const char *fas216_bus_phase(int stat) { static const char *phases[] = { "DATA OUT", "DATA IN", "COMMAND", "STATUS", "MISC OUT", "MISC IN", "MESG OUT", "MESG IN" }; return phases[stat & STAT_BUSMASK]; } static const char *fas216_drv_phase(FAS216_Info *info) { static const char *phases[] = { [PHASE_IDLE] = "idle", [PHASE_SELECTION] = "selection", [PHASE_COMMAND] = "command", [PHASE_DATAOUT] = "data out", [PHASE_DATAIN] = "data in", [PHASE_MSGIN] = "message in", [PHASE_MSGIN_DISCONNECT]= "disconnect", [PHASE_MSGOUT_EXPECT] = "expect message out", [PHASE_MSGOUT] = "message out", [PHASE_STATUS] = "status", [PHASE_DONE] = "done", }; if (info->scsi.phase < ARRAY_SIZE(phases) && phases[info->scsi.phase]) return phases[info->scsi.phase]; return "???"; } static char fas216_target(FAS216_Info *info) { if (info->SCpnt) return '0' + info->SCpnt->device->id; else return 'H'; } static void fas216_do_log(FAS216_Info *info, char target, char *fmt, va_list ap) { static char buf[1024]; vsnprintf(buf, sizeof(buf), fmt, ap); printk("scsi%d.%c: %s", info->host->host_no, target, buf); } static void fas216_log_command(FAS216_Info *info, int level, struct scsi_cmnd *SCpnt, char *fmt, ...) { va_list args; if (level != 0 && !(level & level_mask)) return; va_start(args, fmt); fas216_do_log(info, '0' + SCpnt->device->id, fmt, args); va_end(args); scsi_print_command(SCpnt); } static void fas216_log_target(FAS216_Info *info, int level, int target, char *fmt, ...) { va_list args; if (level != 0 && !(level & level_mask)) return; if (target < 0) target = 'H'; else target += '0'; va_start(args, fmt); fas216_do_log(info, target, fmt, args); va_end(args); printk("\n"); } static void fas216_log(FAS216_Info *info, int level, char *fmt, ...) { va_list args; if (level != 0 && !(level & level_mask)) return; va_start(args, fmt); fas216_do_log(info, fas216_target(info), fmt, args); va_end(args); printk("\n"); } #define PH_SIZE 32 static struct { int stat, ssr, isr, ph; } ph_list[PH_SIZE]; static int ph_ptr; static void add_debug_list(int stat, int ssr, int isr, int ph) { ph_list[ph_ptr].stat = stat; ph_list[ph_ptr].ssr = ssr; ph_list[ph_ptr].isr = isr; ph_list[ph_ptr].ph = ph; ph_ptr = (ph_ptr + 1) & (PH_SIZE-1); } static struct { int command; void *from; } cmd_list[8]; static int cmd_ptr; static void fas216_cmd(FAS216_Info *info, unsigned int command) { cmd_list[cmd_ptr].command = command; cmd_list[cmd_ptr].from = __builtin_return_address(0); cmd_ptr = (cmd_ptr + 1) & 7; fas216_writeb(info, REG_CMD, command); } static void print_debug_list(void) { int i; i = ph_ptr; printk(KERN_ERR "SCSI IRQ trail\n"); do { printk(" %02x:%02x:%02x:%1x", ph_list[i].stat, ph_list[i].ssr, ph_list[i].isr, ph_list[i].ph); i = (i + 1) & (PH_SIZE - 1); if (((i ^ ph_ptr) & 7) == 0) printk("\n"); } while (i != ph_ptr); if ((i ^ ph_ptr) & 7) printk("\n"); i = cmd_ptr; printk(KERN_ERR "FAS216 commands: "); do { printk("%02x:%p ", cmd_list[i].command, cmd_list[i].from); i = (i + 1) & 7; } while (i != cmd_ptr); printk("\n"); } static void fas216_done(FAS216_Info *info, unsigned int result); /** * fas216_get_last_msg - retrive last message from the list * @info: interface to search * @pos: current fifo position * * Retrieve a last message from the list, using position in fifo. */ static inline unsigned short fas216_get_last_msg(FAS216_Info *info, int pos) { unsigned short packed_msg = NOP; struct message *msg; int msgnr = 0; while ((msg = msgqueue_getmsg(&info->scsi.msgs, msgnr++)) != NULL) { if (pos >= msg->fifo) break; } if (msg) { if (msg->msg[0] == EXTENDED_MESSAGE) packed_msg = EXTENDED_MESSAGE | msg->msg[2] << 8; else packed_msg = msg->msg[0]; } fas216_log(info, LOG_MESSAGES, "Message: %04x found at position %02x\n", packed_msg, pos); return packed_msg; } /** * fas216_syncperiod - calculate STP register value * @info: state structure for interface connected to device * @ns: period in ns (between subsequent bytes) * * Calculate value to be loaded into the STP register for a given period * in ns. Returns a value suitable for REG_STP. */ static int fas216_syncperiod(FAS216_Info *info, int ns) { int value = (info->ifcfg.clockrate * ns) / 1000; fas216_checkmagic(info); if (value < 4) value = 4; else if (value > 35) value = 35; return value & 31; } /** * fas216_set_sync - setup FAS216 chip for specified transfer period. * @info: state structure for interface connected to device * @target: target * * Correctly setup FAS216 chip for specified transfer period. * Notes : we need to switch the chip out of FASTSCSI mode if we have * a transfer period >= 200ns - otherwise the chip will violate * the SCSI timings. */ static void fas216_set_sync(FAS216_Info *info, int target) { unsigned int cntl3; fas216_writeb(info, REG_SOF, info->device[target].sof); fas216_writeb(info, REG_STP, info->device[target].stp); cntl3 = info->scsi.cfg[2]; if (info->device[target].period >= (200 / 4)) cntl3 = cntl3 & ~CNTL3_FASTSCSI; fas216_writeb(info, REG_CNTL3, cntl3); } /* Synchronous transfer support * * Note: The SCSI II r10 spec says (5.6.12): * * (2) Due to historical problems with early host adapters that could * not accept an SDTR message, some targets may not initiate synchronous * negotiation after a power cycle as required by this standard. Host * adapters that support synchronous mode may avoid the ensuing failure * modes when the target is independently power cycled by initiating a * synchronous negotiation on each REQUEST SENSE and INQUIRY command. * This approach increases the SCSI bus overhead and is not recommended * for new implementations. The correct method is to respond to an * SDTR message with a MESSAGE REJECT message if the either the * initiator or target devices does not support synchronous transfers * or does not want to negotiate for synchronous transfers at the time. * Using the correct method assures compatibility with wide data * transfers and future enhancements. * * We will always initiate a synchronous transfer negotiation request on * every INQUIRY or REQUEST SENSE message, unless the target itself has * at some point performed a synchronous transfer negotiation request, or * we have synchronous transfers disabled for this device. */ /** * fas216_handlesync - Handle a synchronous transfer message * @info: state structure for interface * @msg: message from target * * Handle a synchronous transfer message from the target */ static void fas216_handlesync(FAS216_Info *info, char *msg) { struct fas216_device *dev = &info->device[info->SCpnt->device->id]; enum { sync, async, none, reject } res = none; #ifdef SCSI2_SYNC switch (msg[0]) { case MESSAGE_REJECT: /* Synchronous transfer request failed. * Note: SCSI II r10: * * SCSI devices that are capable of synchronous * data transfers shall not respond to an SDTR * message with a MESSAGE REJECT message. * * Hence, if we get this condition, we disable * negotiation for this device. */ if (dev->sync_state == neg_inprogress) { dev->sync_state = neg_invalid; res = async; } break; case EXTENDED_MESSAGE: switch (dev->sync_state) { /* We don't accept synchronous transfer requests. * Respond with a MESSAGE_REJECT to prevent a * synchronous transfer agreement from being reached. */ case neg_invalid: res = reject; break; /* We were not negotiating a synchronous transfer, * but the device sent us a negotiation request. * Honour the request by sending back a SDTR * message containing our capability, limited by * the targets capability. */ default: fas216_cmd(info, CMD_SETATN); if (msg[4] > info->ifcfg.sync_max_depth) msg[4] = info->ifcfg.sync_max_depth; if (msg[3] < 1000 / info->ifcfg.clockrate) msg[3] = 1000 / info->ifcfg.clockrate; msgqueue_flush(&info->scsi.msgs); msgqueue_addmsg(&info->scsi.msgs, 5, EXTENDED_MESSAGE, 3, EXTENDED_SDTR, msg[3], msg[4]); info->scsi.phase = PHASE_MSGOUT_EXPECT; /* This is wrong. The agreement is not in effect * until this message is accepted by the device */ dev->sync_state = neg_targcomplete; res = sync; break; /* We initiated the synchronous transfer negotiation, * and have successfully received a response from the * target. The synchronous transfer agreement has been * reached. Note: if the values returned are out of our * bounds, we must reject the message. */ case neg_inprogress: res = reject; if (msg[4] <= info->ifcfg.sync_max_depth && msg[3] >= 1000 / info->ifcfg.clockrate) { dev->sync_state = neg_complete; res = sync; } break; } } #else res = reject; #endif switch (res) { case sync: dev->period = msg[3]; dev->sof = msg[4]; dev->stp = fas216_syncperiod(info, msg[3] * 4); fas216_set_sync(info, info->SCpnt->device->id); break; case reject: fas216_cmd(info, CMD_SETATN); msgqueue_flush(&info->scsi.msgs); msgqueue_addmsg(&info->scsi.msgs, 1, MESSAGE_REJECT); info->scsi.phase = PHASE_MSGOUT_EXPECT; fallthrough; case async: dev->period = info->ifcfg.asyncperiod / 4; dev->sof = 0; dev->stp = info->scsi.async_stp; fas216_set_sync(info, info->SCpnt->device->id); break; case none: break; } } /** * fas216_updateptrs - update data pointers after transfer suspended/paused * @info: interface's local pointer to update * @bytes_transferred: number of bytes transferred * * Update data pointers after transfer suspended/paused */ static void fas216_updateptrs(FAS216_Info *info, int bytes_transferred) { struct scsi_pointer *SCp = &info->scsi.SCp; fas216_checkmagic(info); BUG_ON(bytes_transferred < 0); SCp->phase -= bytes_transferred; while (bytes_transferred != 0) { if (SCp->this_residual > bytes_transferred) break; /* * We have used up this buffer. Move on to the * next buffer. */ bytes_transferred -= SCp->this_residual; if (!next_SCp(SCp) && bytes_transferred) { printk(KERN_WARNING "scsi%d.%c: out of buffers\n", info->host->host_no, '0' + info->SCpnt->device->id); return; } } SCp->this_residual -= bytes_transferred; if (SCp->this_residual) SCp->ptr += bytes_transferred; else SCp->ptr = NULL; } /** * fas216_pio - transfer data off of/on to card using programmed IO * @info: interface to transfer data to/from * @direction: direction to transfer data (DMA_OUT/DMA_IN) * * Transfer data off of/on to card using programmed IO. * Notes: this is incredibly slow. */ static void fas216_pio(FAS216_Info *info, fasdmadir_t direction) { struct scsi_pointer *SCp = &info->scsi.SCp; fas216_checkmagic(info); if (direction == DMA_OUT) fas216_writeb(info, REG_FF, get_next_SCp_byte(SCp)); else put_next_SCp_byte(SCp, fas216_readb(info, REG_FF)); if (SCp->this_residual == 0) next_SCp(SCp); } static void fas216_set_stc(FAS216_Info *info, unsigned int length) { fas216_writeb(info, REG_STCL, length); fas216_writeb(info, REG_STCM, length >> 8); fas216_writeb(info, REG_STCH, length >> 16); } static unsigned int fas216_get_ctc(FAS216_Info *info) { return fas216_readb(info, REG_CTCL) + (fas216_readb(info, REG_CTCM) << 8) + (fas216_readb(info, REG_CTCH) << 16); } /** * fas216_cleanuptransfer - clean up after a transfer has completed. * @info: interface to clean up * * Update the data pointers according to the number of bytes transferred * on the SCSI bus. */ static void fas216_cleanuptransfer(FAS216_Info *info) { unsigned long total, residual, fifo; fasdmatype_t dmatype = info->dma.transfer_type; info->dma.transfer_type = fasdma_none; /* * PIO transfers do not need to be cleaned up. */ if (dmatype == fasdma_pio || dmatype == fasdma_none) return; if (dmatype == fasdma_real_all) total = info->scsi.SCp.phase; else total = info->scsi.SCp.this_residual; residual = fas216_get_ctc(info); fifo = fas216_readb(info, REG_CFIS) & CFIS_CF; fas216_log(info, LOG_BUFFER, "cleaning up from previous " "transfer: length 0x%06x, residual 0x%x, fifo %d", total, residual, fifo); /* * If we were performing Data-Out, the transfer counter * counts down each time a byte is transferred by the * host to the FIFO. This means we must include the * bytes left in the FIFO from the transfer counter. */ if (info->scsi.phase == PHASE_DATAOUT) residual += fifo; fas216_updateptrs(info, total - residual); } /** * fas216_transfer - Perform a DMA/PIO transfer off of/on to card * @info: interface from which device disconnected from * * Start a DMA/PIO transfer off of/on to card */ static void fas216_transfer(FAS216_Info *info) { fasdmadir_t direction; fasdmatype_t dmatype; fas216_log(info, LOG_BUFFER, "starttransfer: buffer %p length 0x%06x reqlen 0x%06x", info->scsi.SCp.ptr, info->scsi.SCp.this_residual, info->scsi.SCp.phase); if (!info->scsi.SCp.ptr) { fas216_log(info, LOG_ERROR, "null buffer passed to " "fas216_starttransfer"); print_SCp(&info->scsi.SCp, "SCp: ", "\n"); print_SCp(arm_scsi_pointer(info->SCpnt), "Cmnd SCp: ", "\n"); return; } /* * If we have a synchronous transfer agreement in effect, we must * use DMA mode. If we are using asynchronous transfers, we may * use DMA mode or PIO mode. */ if (info->device[info->SCpnt->device->id].sof) dmatype = fasdma_real_all; else dmatype = fasdma_pio; if (info->scsi.phase == PHASE_DATAOUT) direction = DMA_OUT; else direction = DMA_IN; if (info->dma.setup) dmatype = info->dma.setup(info->host, &info->scsi.SCp, direction, dmatype); info->dma.transfer_type = dmatype; if (dmatype == fasdma_real_all) fas216_set_stc(info, info->scsi.SCp.phase); else fas216_set_stc(info, info->scsi.SCp.this_residual); switch (dmatype) { case fasdma_pio: fas216_log(info, LOG_BUFFER, "PIO transfer"); fas216_writeb(info, REG_SOF, 0); fas216_writeb(info, REG_STP, info->scsi.async_stp); fas216_cmd(info, CMD_TRANSFERINFO); fas216_pio(info, direction); break; case fasdma_pseudo: fas216_log(info, LOG_BUFFER, "pseudo transfer"); fas216_cmd(info, CMD_TRANSFERINFO | CMD_WITHDMA); info->dma.pseudo(info->host, &info->scsi.SCp, direction, info->SCpnt->transfersize); break; case fasdma_real_block: fas216_log(info, LOG_BUFFER, "block dma transfer"); fas216_cmd(info, CMD_TRANSFERINFO | CMD_WITHDMA); break; case fasdma_real_all: fas216_log(info, LOG_BUFFER, "total dma transfer"); fas216_cmd(info, CMD_TRANSFERINFO | CMD_WITHDMA); break; default: fas216_log(info, LOG_BUFFER | LOG_ERROR, "invalid FAS216 DMA type"); break; } } /** * fas216_stoptransfer - Stop a DMA transfer onto / off of the card * @info: interface from which device disconnected from * * Called when we switch away from DATA IN or DATA OUT phases. */ static void fas216_stoptransfer(FAS216_Info *info) { fas216_checkmagic(info); if (info->dma.transfer_type == fasdma_real_all || info->dma.transfer_type == fasdma_real_block) info->dma.stop(info->host, &info->scsi.SCp); fas216_cleanuptransfer(info); if (info->scsi.phase == PHASE_DATAIN) { unsigned int fifo; /* * If we were performing Data-In, then the FIFO counter * contains the number of bytes not transferred via DMA * from the on-board FIFO. Read them manually. */ fifo = fas216_readb(info, REG_CFIS) & CFIS_CF; while (fifo && info->scsi.SCp.ptr) { *info->scsi.SCp.ptr = fas216_readb(info, REG_FF); fas216_updateptrs(info, 1); fifo--; } } else { /* * After a Data-Out phase, there may be unsent * bytes left in the FIFO. Flush them out. */ fas216_cmd(info, CMD_FLUSHFIFO); } } static void fas216_aborttransfer(FAS216_Info *info) { fas216_checkmagic(info); if (info->dma.transfer_type == fasdma_real_all || info->dma.transfer_type == fasdma_real_block) info->dma.stop(info->host, &info->scsi.SCp); info->dma.transfer_type = fasdma_none; fas216_cmd(info, CMD_FLUSHFIFO); } static void fas216_kick(FAS216_Info *info); /** * fas216_disconnected_intr - handle device disconnection * @info: interface from which device disconnected from * * Handle device disconnection */ static void fas216_disconnect_intr(FAS216_Info *info) { unsigned long flags; fas216_checkmagic(info); fas216_log(info, LOG_CONNECT, "disconnect phase=%02x", info->scsi.phase); msgqueue_flush(&info->scsi.msgs); switch (info->scsi.phase) { case PHASE_SELECTION: /* while selecting - no target */ case PHASE_SELSTEPS: fas216_done(info, DID_NO_CONNECT); break; case PHASE_MSGIN_DISCONNECT: /* message in - disconnecting */ info->scsi.disconnectable = 1; info->scsi.phase = PHASE_IDLE; info->stats.disconnects += 1; spin_lock_irqsave(&info->host_lock, flags); if (info->scsi.phase == PHASE_IDLE) fas216_kick(info); spin_unlock_irqrestore(&info->host_lock, flags); break; case PHASE_DONE: /* at end of command - complete */ fas216_done(info, DID_OK); break; case PHASE_MSGOUT: /* message out - possible ABORT message */ if (fas216_get_last_msg(info, info->scsi.msgin_fifo) == ABORT) { info->scsi.aborting = 0; fas216_done(info, DID_ABORT); break; } fallthrough; default: /* huh? */ printk(KERN_ERR "scsi%d.%c: unexpected disconnect in phase %s\n", info->host->host_no, fas216_target(info), fas216_drv_phase(info)); print_debug_list(); fas216_stoptransfer(info); fas216_done(info, DID_ERROR); break; } } /** * fas216_reselected_intr - start reconnection of a device * @info: interface which was reselected * * Start reconnection of a device */ static void fas216_reselected_intr(FAS216_Info *info) { unsigned int cfis, i; unsigned char msg[4]; unsigned char target, lun, tag; fas216_checkmagic(info); WARN_ON(info->scsi.phase == PHASE_SELECTION || info->scsi.phase == PHASE_SELSTEPS); cfis = fas216_readb(info, REG_CFIS); fas216_log(info, LOG_CONNECT, "reconnect phase=%02x cfis=%02x", info->scsi.phase, cfis); cfis &= CFIS_CF; if (cfis < 2 || cfis > 4) { printk(KERN_ERR "scsi%d.H: incorrect number of bytes after reselect\n", info->host->host_no); goto bad_message; } for (i = 0; i < cfis; i++) msg[i] = fas216_readb(info, REG_FF); if (!(msg[0] & (1 << info->host->this_id)) || !(msg[1] & 0x80)) goto initiator_error; target = msg[0] & ~(1 << info->host->this_id); target = ffs(target) - 1; lun = msg[1] & 7; tag = 0; if (cfis >= 3) { if (msg[2] != SIMPLE_QUEUE_TAG) goto initiator_error; tag = msg[3]; } /* set up for synchronous transfers */ fas216_writeb(info, REG_SDID, target); fas216_set_sync(info, target); msgqueue_flush(&info->scsi.msgs); fas216_log(info, LOG_CONNECT, "Reconnected: target %1x lun %1x tag %02x", target, lun, tag); if (info->scsi.disconnectable && info->SCpnt) { info->scsi.disconnectable = 0; if (info->SCpnt->device->id == target && info->SCpnt->device->lun == lun && scsi_cmd_to_rq(info->SCpnt)->tag == tag) { fas216_log(info, LOG_CONNECT, "reconnected previously executing command"); } else { queue_add_cmd_tail(&info->queues.disconnected, info->SCpnt); fas216_log(info, LOG_CONNECT, "had to move command to disconnected queue"); info->SCpnt = NULL; } } if (!info->SCpnt) { info->SCpnt = queue_remove_tgtluntag(&info->queues.disconnected, target, lun, tag); fas216_log(info, LOG_CONNECT, "had to get command"); } if (info->SCpnt) { /* * Restore data pointer from SAVED data pointer */ info->scsi.SCp = *arm_scsi_pointer(info->SCpnt); fas216_log(info, LOG_CONNECT, "data pointers: [%p, %X]", info->scsi.SCp.ptr, info->scsi.SCp.this_residual); info->scsi.phase = PHASE_MSGIN; } else { /* * Our command structure not found - abort the * command on the target. Since we have no * record of this command, we can't send * an INITIATOR DETECTED ERROR message. */ fas216_cmd(info, CMD_SETATN); #if 0 if (tag) msgqueue_addmsg(&info->scsi.msgs, 2, ABORT_TAG, tag); else #endif msgqueue_addmsg(&info->scsi.msgs, 1, ABORT); info->scsi.phase = PHASE_MSGOUT_EXPECT; info->scsi.aborting = 1; } fas216_cmd(info, CMD_MSGACCEPTED); return; initiator_error: printk(KERN_ERR "scsi%d.H: error during reselection: bytes", info->host->host_no); for (i = 0; i < cfis; i++) printk(" %02x", msg[i]); printk("\n"); bad_message: fas216_cmd(info, CMD_SETATN); msgqueue_flush(&info->scsi.msgs); msgqueue_addmsg(&info->scsi.msgs, 1, INITIATOR_ERROR); info->scsi.phase = PHASE_MSGOUT_EXPECT; fas216_cmd(info, CMD_MSGACCEPTED); } static void fas216_parse_message(FAS216_Info *info, unsigned char *message, int msglen) { struct scsi_pointer *scsi_pointer; int i; switch (message[0]) { case COMMAND_COMPLETE: if (msglen != 1) goto unrecognised; printk(KERN_ERR "scsi%d.%c: command complete with no " "status in MESSAGE_IN?\n", info->host->host_no, fas216_target(info)); break; case SAVE_POINTERS: if (msglen != 1) goto unrecognised; /* * Save current data pointer to SAVED data pointer * SCSI II standard says that we must not acknowledge * this until we have really saved pointers. * NOTE: we DO NOT save the command nor status pointers * as required by the SCSI II standard. These always * point to the start of their respective areas. */ scsi_pointer = arm_scsi_pointer(info->SCpnt); *scsi_pointer = info->scsi.SCp; scsi_pointer->sent_command = 0; fas216_log(info, LOG_CONNECT | LOG_MESSAGES | LOG_BUFFER, "save data pointers: [%p, %X]", info->scsi.SCp.ptr, info->scsi.SCp.this_residual); break; case RESTORE_POINTERS: if (msglen != 1) goto unrecognised; /* * Restore current data pointer from SAVED data pointer */ info->scsi.SCp = *arm_scsi_pointer(info->SCpnt); fas216_log(info, LOG_CONNECT | LOG_MESSAGES | LOG_BUFFER, "restore data pointers: [%p, 0x%x]", info->scsi.SCp.ptr, info->scsi.SCp.this_residual); break; case DISCONNECT: if (msglen != 1) goto unrecognised; info->scsi.phase = PHASE_MSGIN_DISCONNECT; break; case MESSAGE_REJECT: if (msglen != 1) goto unrecognised; switch (fas216_get_last_msg(info, info->scsi.msgin_fifo)) { case EXTENDED_MESSAGE | EXTENDED_SDTR << 8: fas216_handlesync(info, message); break; default: fas216_log(info, 0, "reject, last message 0x%04x", fas216_get_last_msg(info, info->scsi.msgin_fifo)); } break; case NOP: break; case EXTENDED_MESSAGE: if (msglen < 3) goto unrecognised; switch (message[2]) { case EXTENDED_SDTR: /* Sync transfer negotiation request/reply */ fas216_handlesync(info, message); break; default: goto unrecognised; } break; default: goto unrecognised; } return; unrecognised: fas216_log(info, 0, "unrecognised message, rejecting"); printk("scsi%d.%c: message was", info->host->host_no, fas216_target(info)); for (i = 0; i < msglen; i++) printk("%s%02X", i & 31 ? " " : "\n ", message[i]); printk("\n"); /* * Something strange seems to be happening here - * I can't use SETATN since the chip gives me an * invalid command interrupt when I do. Weird. */ fas216_cmd(info, CMD_NOP); fas216_dumpstate(info); fas216_cmd(info, CMD_SETATN); msgqueue_flush(&info->scsi.msgs); msgqueue_addmsg(&info->scsi.msgs, 1, MESSAGE_REJECT); info->scsi.phase = PHASE_MSGOUT_EXPECT; fas216_dumpstate(info); } static int fas216_wait_cmd(FAS216_Info *info, int cmd) { int tout; int stat; fas216_cmd(info, cmd); for (tout = 1000; tout; tout -= 1) { stat = fas216_readb(info, REG_STAT); if (stat & (STAT_INT|STAT_PARITYERROR)) break; udelay(1); } return stat; } static int fas216_get_msg_byte(FAS216_Info *info) { unsigned int stat = fas216_wait_cmd(info, CMD_MSGACCEPTED); if ((stat & STAT_INT) == 0) goto timedout; if ((stat & STAT_BUSMASK) != STAT_MESGIN) goto unexpected_phase_change; fas216_readb(info, REG_INST); stat = fas216_wait_cmd(info, CMD_TRANSFERINFO); if ((stat & STAT_INT) == 0) goto timedout; if (stat & STAT_PARITYERROR) goto parity_error; if ((stat & STAT_BUSMASK) != STAT_MESGIN) goto unexpected_phase_change; fas216_readb(info, REG_INST); return fas216_readb(info, REG_FF); timedout: fas216_log(info, LOG_ERROR, "timed out waiting for message byte"); return -1; unexpected_phase_change: fas216_log(info, LOG_ERROR, "unexpected phase change: status = %02x", stat); return -2; parity_error: fas216_log(info, LOG_ERROR, "parity error during message in phase"); return -3; } /** * fas216_message - handle a function done interrupt from FAS216 chip * @info: interface which caused function done interrupt * * Handle a function done interrupt from FAS216 chip */ static void fas216_message(FAS216_Info *info) { unsigned char *message = info->scsi.message; unsigned int msglen = 1; int msgbyte = 0; fas216_checkmagic(info); message[0] = fas216_readb(info, REG_FF); if (message[0] == EXTENDED_MESSAGE) { msgbyte = fas216_get_msg_byte(info); if (msgbyte >= 0) { message[1] = msgbyte; for (msglen = 2; msglen < message[1] + 2; msglen++) { msgbyte = fas216_get_msg_byte(info); if (msgbyte >= 0) message[msglen] = msgbyte; else break; } } } if (msgbyte == -3) goto parity_error; #ifdef DEBUG_MESSAGES { int i; printk("scsi%d.%c: message in: ", info->host->host_no, fas216_target(info)); for (i = 0; i < msglen; i++) printk("%02X ", message[i]); printk("\n"); } #endif fas216_parse_message(info, message, msglen); fas216_cmd(info, CMD_MSGACCEPTED); return; parity_error: fas216_cmd(info, CMD_SETATN); msgqueue_flush(&info->scsi.msgs); msgqueue_addmsg(&info->scsi.msgs, 1, MSG_PARITY_ERROR); info->scsi.phase = PHASE_MSGOUT_EXPECT; fas216_cmd(info, CMD_MSGACCEPTED); return; } /** * fas216_send_command - send command after all message bytes have been sent * @info: interface which caused bus service * * Send a command to a target after all message bytes have been sent */ static void fas216_send_command(FAS216_Info *info) { int i; fas216_checkmagic(info); fas216_cmd(info, CMD_NOP|CMD_WITHDMA); fas216_cmd(info, CMD_FLUSHFIFO); /* load command */ for (i = info->scsi.SCp.sent_command; i < info->SCpnt->cmd_len; i++) fas216_writeb(info, REG_FF, info->SCpnt->cmnd[i]); fas216_cmd(info, CMD_TRANSFERINFO); info->scsi.phase = PHASE_COMMAND; } /** * fas216_send_messageout - handle bus service to send a message * @info: interface which caused bus service * * Handle bus service to send a message. * Note: We do not allow the device to change the data direction! */ static void fas216_send_messageout(FAS216_Info *info, int start) { unsigned int tot_msglen = msgqueue_msglength(&info->scsi.msgs); fas216_checkmagic(info); fas216_cmd(info, CMD_FLUSHFIFO); if (tot_msglen) { struct message *msg; int msgnr = 0; while ((msg = msgqueue_getmsg(&info->scsi.msgs, msgnr++)) != NULL) { int i; for (i = start; i < msg->length; i++) fas216_writeb(info, REG_FF, msg->msg[i]); msg->fifo = tot_msglen - (fas216_readb(info, REG_CFIS) & CFIS_CF); start = 0; } } else fas216_writeb(info, REG_FF, NOP); fas216_cmd(info, CMD_TRANSFERINFO); info->scsi.phase = PHASE_MSGOUT; } /** * fas216_busservice_intr - handle bus service interrupt from FAS216 chip * @info: interface which caused bus service interrupt * @stat: Status register contents * @is: SCSI Status register contents * * Handle a bus service interrupt from FAS216 chip */ static void fas216_busservice_intr(FAS216_Info *info, unsigned int stat, unsigned int is) { fas216_checkmagic(info); fas216_log(info, LOG_BUSSERVICE, "bus service: stat=%02x is=%02x phase=%02x", stat, is, info->scsi.phase); switch (info->scsi.phase) { case PHASE_SELECTION: if ((is & IS_BITS) != IS_MSGBYTESENT) goto bad_is; break; case PHASE_SELSTEPS: switch (is & IS_BITS) { case IS_SELARB: case IS_MSGBYTESENT: goto bad_is; case IS_NOTCOMMAND: case IS_EARLYPHASE: if ((stat & STAT_BUSMASK) == STAT_MESGIN) break; goto bad_is; case IS_COMPLETE: break; } break; default: break; } fas216_cmd(info, CMD_NOP); #define STATE(st,ph) ((ph) << 3 | (st)) /* This table describes the legal SCSI state transitions, * as described by the SCSI II spec. */ switch (STATE(stat & STAT_BUSMASK, info->scsi.phase)) { case STATE(STAT_DATAIN, PHASE_SELSTEPS):/* Sel w/ steps -> Data In */ case STATE(STAT_DATAIN, PHASE_MSGOUT): /* Message Out -> Data In */ case STATE(STAT_DATAIN, PHASE_COMMAND): /* Command -> Data In */ case STATE(STAT_DATAIN, PHASE_MSGIN): /* Message In -> Data In */ info->scsi.phase = PHASE_DATAIN; fas216_transfer(info); return; case STATE(STAT_DATAIN, PHASE_DATAIN): /* Data In -> Data In */ case STATE(STAT_DATAOUT, PHASE_DATAOUT):/* Data Out -> Data Out */ fas216_cleanuptransfer(info); fas216_transfer(info); return; case STATE(STAT_DATAOUT, PHASE_SELSTEPS):/* Sel w/ steps-> Data Out */ case STATE(STAT_DATAOUT, PHASE_MSGOUT): /* Message Out -> Data Out */ case STATE(STAT_DATAOUT, PHASE_COMMAND):/* Command -> Data Out */ case STATE(STAT_DATAOUT, PHASE_MSGIN): /* Message In -> Data Out */ fas216_cmd(info, CMD_FLUSHFIFO); info->scsi.phase = PHASE_DATAOUT; fas216_transfer(info); return; case STATE(STAT_STATUS, PHASE_DATAOUT): /* Data Out -> Status */ case STATE(STAT_STATUS, PHASE_DATAIN): /* Data In -> Status */ fas216_stoptransfer(info); fallthrough; case STATE(STAT_STATUS, PHASE_SELSTEPS):/* Sel w/ steps -> Status */ case STATE(STAT_STATUS, PHASE_MSGOUT): /* Message Out -> Status */ case STATE(STAT_STATUS, PHASE_COMMAND): /* Command -> Status */ case STATE(STAT_STATUS, PHASE_MSGIN): /* Message In -> Status */ fas216_cmd(info, CMD_INITCMDCOMPLETE); info->scsi.phase = PHASE_STATUS; return; case STATE(STAT_MESGIN, PHASE_DATAOUT): /* Data Out -> Message In */ case STATE(STAT_MESGIN, PHASE_DATAIN): /* Data In -> Message In */ fas216_stoptransfer(info); fallthrough; case STATE(STAT_MESGIN, PHASE_COMMAND): /* Command -> Message In */ case STATE(STAT_MESGIN, PHASE_SELSTEPS):/* Sel w/ steps -> Message In */ case STATE(STAT_MESGIN, PHASE_MSGOUT): /* Message Out -> Message In */ info->scsi.msgin_fifo = fas216_readb(info, REG_CFIS) & CFIS_CF; fas216_cmd(info, CMD_FLUSHFIFO); fas216_cmd(info, CMD_TRANSFERINFO); info->scsi.phase = PHASE_MSGIN; return; case STATE(STAT_MESGIN, PHASE_MSGIN): info->scsi.msgin_fifo = fas216_readb(info, REG_CFIS) & CFIS_CF; fas216_cmd(info, CMD_TRANSFERINFO); return; case STATE(STAT_COMMAND, PHASE_MSGOUT): /* Message Out -> Command */ case STATE(STAT_COMMAND, PHASE_MSGIN): /* Message In -> Command */ fas216_send_command(info); info->scsi.phase = PHASE_COMMAND; return; /* * Selection -> Message Out */ case STATE(STAT_MESGOUT, PHASE_SELECTION): fas216_send_messageout(info, 1); return; /* * Message Out -> Message Out */ case STATE(STAT_MESGOUT, PHASE_SELSTEPS): case STATE(STAT_MESGOUT, PHASE_MSGOUT): /* * If we get another message out phase, this usually * means some parity error occurred. Resend complete * set of messages. If we have more than one byte to * send, we need to assert ATN again. */ if (info->device[info->SCpnt->device->id].parity_check) { /* * We were testing... good, the device * supports parity checking. */ info->device[info->SCpnt->device->id].parity_check = 0; info->device[info->SCpnt->device->id].parity_enabled = 1; fas216_writeb(info, REG_CNTL1, info->scsi.cfg[0]); } if (msgqueue_msglength(&info->scsi.msgs) > 1) fas216_cmd(info, CMD_SETATN); fallthrough; /* * Any -> Message Out */ case STATE(STAT_MESGOUT, PHASE_MSGOUT_EXPECT): fas216_send_messageout(info, 0); return; /* Error recovery rules. * These either attempt to abort or retry the operation. * TODO: we need more of these */ case STATE(STAT_COMMAND, PHASE_COMMAND):/* Command -> Command */ /* error - we've sent out all the command bytes * we have. * NOTE: we need SAVE DATA POINTERS/RESTORE DATA POINTERS * to include the command bytes sent for this to work * correctly. */ printk(KERN_ERR "scsi%d.%c: " "target trying to receive more command bytes\n", info->host->host_no, fas216_target(info)); fas216_cmd(info, CMD_SETATN); fas216_set_stc(info, 15); fas216_cmd(info, CMD_PADBYTES | CMD_WITHDMA); msgqueue_flush(&info->scsi.msgs); msgqueue_addmsg(&info->scsi.msgs, 1, INITIATOR_ERROR); info->scsi.phase = PHASE_MSGOUT_EXPECT; return; } if (info->scsi.phase == PHASE_MSGIN_DISCONNECT) { printk(KERN_ERR "scsi%d.%c: disconnect message received, but bus service %s?\n", info->host->host_no, fas216_target(info), fas216_bus_phase(stat)); msgqueue_flush(&info->scsi.msgs); fas216_cmd(info, CMD_SETATN); msgqueue_addmsg(&info->scsi.msgs, 1, INITIATOR_ERROR); info->scsi.phase = PHASE_MSGOUT_EXPECT; info->scsi.aborting = 1; fas216_cmd(info, CMD_TRANSFERINFO); return; } printk(KERN_ERR "scsi%d.%c: bus phase %s after %s?\n", info->host->host_no, fas216_target(info), fas216_bus_phase(stat), fas216_drv_phase(info)); print_debug_list(); return; bad_is: fas216_log(info, 0, "bus service at step %d?", is & IS_BITS); fas216_dumpstate(info); print_debug_list(); fas216_done(info, DID_ERROR); } /** * fas216_funcdone_intr - handle a function done interrupt from FAS216 chip * @info: interface which caused function done interrupt * @stat: Status register contents * @is: SCSI Status register contents * * Handle a function done interrupt from FAS216 chip */ static void fas216_funcdone_intr(FAS216_Info *info, unsigned int stat, unsigned int is) { unsigned int fifo_len = fas216_readb(info, REG_CFIS) & CFIS_CF; fas216_checkmagic(info); fas216_log(info, LOG_FUNCTIONDONE, "function done: stat=%02x is=%02x phase=%02x", stat, is, info->scsi.phase); switch (info->scsi.phase) { case PHASE_STATUS: /* status phase - read status and msg */ if (fifo_len != 2) { fas216_log(info, 0, "odd number of bytes in FIFO: %d", fifo_len); } /* * Read status then message byte. */ info->scsi.SCp.Status = fas216_readb(info, REG_FF); info->scsi.SCp.Message = fas216_readb(info, REG_FF); info->scsi.phase = PHASE_DONE; fas216_cmd(info, CMD_MSGACCEPTED); break; case PHASE_IDLE: case PHASE_SELECTION: case PHASE_SELSTEPS: break; case PHASE_MSGIN: /* message in phase */ if ((stat & STAT_BUSMASK) == STAT_MESGIN) { info->scsi.msgin_fifo = fifo_len; fas216_message(info); break; } fallthrough; default: fas216_log(info, 0, "internal phase %s for function done?" " What do I do with this?", fas216_target(info), fas216_drv_phase(info)); } } static void fas216_bus_reset(FAS216_Info *info) { neg_t sync_state; int i; msgqueue_flush(&info->scsi.msgs); sync_state = neg_invalid; #ifdef SCSI2_SYNC if (info->ifcfg.capabilities & (FASCAP_DMA|FASCAP_PSEUDODMA)) sync_state = neg_wait; #endif info->scsi.phase = PHASE_IDLE; info->SCpnt = NULL; /* bug! */ memset(&info->scsi.SCp, 0, sizeof(info->scsi.SCp)); for (i = 0; i < 8; i++) { info->device[i].disconnect_ok = info->ifcfg.disconnect_ok; info->device[i].sync_state = sync_state; info->device[i].period = info->ifcfg.asyncperiod / 4; info->device[i].stp = info->scsi.async_stp; info->device[i].sof = 0; info->device[i].wide_xfer = 0; } info->rst_bus_status = 1; wake_up(&info->eh_wait); } /** * fas216_intr - handle interrupts to progress a command * @info: interface to service * * Handle interrupts from the interface to progress a command */ irqreturn_t fas216_intr(FAS216_Info *info) { unsigned char inst, is, stat; int handled = IRQ_NONE; fas216_checkmagic(info); stat = fas216_readb(info, REG_STAT); is = fas216_readb(info, REG_IS); inst = fas216_readb(info, REG_INST); add_debug_list(stat, is, inst, info->scsi.phase); if (stat & STAT_INT) { if (inst & INST_BUSRESET) { fas216_log(info, 0, "bus reset detected"); fas216_bus_reset(info); scsi_report_bus_reset(info->host, 0); } else if (inst & INST_ILLEGALCMD) { fas216_log(info, LOG_ERROR, "illegal command given\n"); fas216_dumpstate(info); print_debug_list(); } else if (inst & INST_DISCONNECT) fas216_disconnect_intr(info); else if (inst & INST_RESELECTED) /* reselected */ fas216_reselected_intr(info); else if (inst & INST_BUSSERVICE) /* bus service request */ fas216_busservice_intr(info, stat, is); else if (inst & INST_FUNCDONE) /* function done */ fas216_funcdone_intr(info, stat, is); else fas216_log(info, 0, "unknown interrupt received:" " phase %s inst %02X is %02X stat %02X", fas216_drv_phase(info), inst, is, stat); handled = IRQ_HANDLED; } return handled; } static void __fas216_start_command(FAS216_Info *info, struct scsi_cmnd *SCpnt) { int tot_msglen; /* following what the ESP driver says */ fas216_set_stc(info, 0); fas216_cmd(info, CMD_NOP | CMD_WITHDMA); /* flush FIFO */ fas216_cmd(info, CMD_FLUSHFIFO); /* load bus-id and timeout */ fas216_writeb(info, REG_SDID, BUSID(SCpnt->device->id)); fas216_writeb(info, REG_STIM, info->ifcfg.select_timeout); /* synchronous transfers */ fas216_set_sync(info, SCpnt->device->id); tot_msglen = msgqueue_msglength(&info->scsi.msgs); #ifdef DEBUG_MESSAGES { struct message *msg; int msgnr = 0, i; printk("scsi%d.%c: message out: ", info->host->host_no, '0' + SCpnt->device->id); while ((msg = msgqueue_getmsg(&info->scsi.msgs, msgnr++)) != NULL) { printk("{ "); for (i = 0; i < msg->length; i++) printk("%02x ", msg->msg[i]); printk("} "); } printk("\n"); } #endif if (tot_msglen == 1 || tot_msglen == 3) { /* * We have an easy message length to send... */ struct message *msg; int msgnr = 0, i; info->scsi.phase = PHASE_SELSTEPS; /* load message bytes */ while ((msg = msgqueue_getmsg(&info->scsi.msgs, msgnr++)) != NULL) { for (i = 0; i < msg->length; i++) fas216_writeb(info, REG_FF, msg->msg[i]); msg->fifo = tot_msglen - (fas216_readb(info, REG_CFIS) & CFIS_CF); } /* load command */ for (i = 0; i < SCpnt->cmd_len; i++) fas216_writeb(info, REG_FF, SCpnt->cmnd[i]); if (tot_msglen == 1) fas216_cmd(info, CMD_SELECTATN); else fas216_cmd(info, CMD_SELECTATN3); } else { /* * We have an unusual number of message bytes to send. * Load first byte into fifo, and issue SELECT with ATN and * stop steps. */ struct message *msg = msgqueue_getmsg(&info->scsi.msgs, 0); fas216_writeb(info, REG_FF, msg->msg[0]); msg->fifo = 1; fas216_cmd(info, CMD_SELECTATNSTOP); } } /* * Decide whether we need to perform a parity test on this device. * Can also be used to force parity error conditions during initial * information transfer phase (message out) for test purposes. */ static int parity_test(FAS216_Info *info, int target) { #if 0 if (target == 3) { info->device[target].parity_check = 0; return 1; } #endif return info->device[target].parity_check; } static void fas216_start_command(FAS216_Info *info, struct scsi_cmnd *SCpnt) { int disconnect_ok; /* * claim host busy */ info->scsi.phase = PHASE_SELECTION; info->scsi.SCp = *arm_scsi_pointer(SCpnt); info->SCpnt = SCpnt; info->dma.transfer_type = fasdma_none; if (parity_test(info, SCpnt->device->id)) fas216_writeb(info, REG_CNTL1, info->scsi.cfg[0] | CNTL1_PTE); else fas216_writeb(info, REG_CNTL1, info->scsi.cfg[0]); /* * Don't allow request sense commands to disconnect. */ disconnect_ok = SCpnt->cmnd[0] != REQUEST_SENSE && info->device[SCpnt->device->id].disconnect_ok; /* * build outgoing message bytes */ msgqueue_flush(&info->scsi.msgs); msgqueue_addmsg(&info->scsi.msgs, 1, IDENTIFY(disconnect_ok, SCpnt->device->lun)); /* * add tag message if required */ if (SCpnt->device->simple_tags) msgqueue_addmsg(&info->scsi.msgs, 2, SIMPLE_QUEUE_TAG, scsi_cmd_to_rq(SCpnt)->tag); do { #ifdef SCSI2_SYNC if ((info->device[SCpnt->device->id].sync_state == neg_wait || info->device[SCpnt->device->id].sync_state == neg_complete) && (SCpnt->cmnd[0] == REQUEST_SENSE || SCpnt->cmnd[0] == INQUIRY)) { info->device[SCpnt->device->id].sync_state = neg_inprogress; msgqueue_addmsg(&info->scsi.msgs, 5, EXTENDED_MESSAGE, 3, EXTENDED_SDTR, 1000 / info->ifcfg.clockrate, info->ifcfg.sync_max_depth); break; } #endif } while (0); __fas216_start_command(info, SCpnt); } static void fas216_allocate_tag(FAS216_Info *info, struct scsi_cmnd *SCpnt) { set_bit(SCpnt->device->id * 8 + (u8)(SCpnt->device->lun & 0x7), info->busyluns); info->stats.removes += 1; switch (SCpnt->cmnd[0]) { case WRITE_6: case WRITE_10: case WRITE_12: info->stats.writes += 1; break; case READ_6: case READ_10: case READ_12: info->stats.reads += 1; break; default: info->stats.miscs += 1; break; } } static void fas216_do_bus_device_reset(FAS216_Info *info, struct scsi_cmnd *SCpnt) { struct message *msg; /* * claim host busy */ info->scsi.phase = PHASE_SELECTION; info->scsi.SCp = *arm_scsi_pointer(SCpnt); info->SCpnt = SCpnt; info->dma.transfer_type = fasdma_none; fas216_log(info, LOG_ERROR, "sending bus device reset"); msgqueue_flush(&info->scsi.msgs); msgqueue_addmsg(&info->scsi.msgs, 1, BUS_DEVICE_RESET); /* following what the ESP driver says */ fas216_set_stc(info, 0); fas216_cmd(info, CMD_NOP | CMD_WITHDMA); /* flush FIFO */ fas216_cmd(info, CMD_FLUSHFIFO); /* load bus-id and timeout */ fas216_writeb(info, REG_SDID, BUSID(SCpnt->device->id)); fas216_writeb(info, REG_STIM, info->ifcfg.select_timeout); /* synchronous transfers */ fas216_set_sync(info, SCpnt->device->id); msg = msgqueue_getmsg(&info->scsi.msgs, 0); fas216_writeb(info, REG_FF, BUS_DEVICE_RESET); msg->fifo = 1; fas216_cmd(info, CMD_SELECTATNSTOP); } /** * fas216_kick - kick a command to the interface * @info: our host interface to kick * * Kick a command to the interface, interface should be idle. * Notes: Interrupts are always disabled! */ static void fas216_kick(FAS216_Info *info) { struct scsi_cmnd *SCpnt = NULL; #define TYPE_OTHER 0 #define TYPE_RESET 1 #define TYPE_QUEUE 2 int where_from = TYPE_OTHER; fas216_checkmagic(info); /* * Obtain the next command to process. */ do { if (info->rstSCpnt) { SCpnt = info->rstSCpnt; /* don't remove it */ where_from = TYPE_RESET; break; } if (info->reqSCpnt) { SCpnt = info->reqSCpnt; info->reqSCpnt = NULL; break; } if (info->origSCpnt) { SCpnt = info->origSCpnt; info->origSCpnt = NULL; break; } /* retrieve next command */ if (!SCpnt) { SCpnt = queue_remove_exclude(&info->queues.issue, info->busyluns); where_from = TYPE_QUEUE; break; } } while (0); if (!SCpnt) { /* * no command pending, so enable reselection. */ fas216_cmd(info, CMD_ENABLESEL); return; } /* * We're going to start a command, so disable reselection */ fas216_cmd(info, CMD_DISABLESEL); if (info->scsi.disconnectable && info->SCpnt) { fas216_log(info, LOG_CONNECT, "moved command for %d to disconnected queue", info->SCpnt->device->id); queue_add_cmd_tail(&info->queues.disconnected, info->SCpnt); info->scsi.disconnectable = 0; info->SCpnt = NULL; } fas216_log_command(info, LOG_CONNECT | LOG_MESSAGES, SCpnt, "starting"); switch (where_from) { case TYPE_QUEUE: fas216_allocate_tag(info, SCpnt); fallthrough; case TYPE_OTHER: fas216_start_command(info, SCpnt); break; case TYPE_RESET: fas216_do_bus_device_reset(info, SCpnt); break; } fas216_log(info, LOG_CONNECT, "select: data pointers [%p, %X]", info->scsi.SCp.ptr, info->scsi.SCp.this_residual); /* * should now get either DISCONNECT or * (FUNCTION DONE with BUS SERVICE) interrupt */ } /* * Clean up from issuing a BUS DEVICE RESET message to a device. */ static void fas216_devicereset_done(FAS216_Info *info, struct scsi_cmnd *SCpnt, unsigned int result) { fas216_log(info, LOG_ERROR, "fas216 device reset complete"); info->rstSCpnt = NULL; info->rst_dev_status = 1; wake_up(&info->eh_wait); } /** * fas216_rq_sns_done - Finish processing automatic request sense command * @info: interface that completed * @SCpnt: command that completed * @result: driver byte of result * * Finish processing automatic request sense command */ static void fas216_rq_sns_done(FAS216_Info *info, struct scsi_cmnd *SCpnt, unsigned int result) { struct scsi_pointer *scsi_pointer = arm_scsi_pointer(SCpnt); fas216_log_target(info, LOG_CONNECT, SCpnt->device->id, "request sense complete, result=0x%04x%02x%02x", result, scsi_pointer->Message, scsi_pointer->Status); if (result != DID_OK || scsi_pointer->Status != SAM_STAT_GOOD) /* * Something went wrong. Make sure that we don't * have valid data in the sense buffer that could * confuse the higher levels. */ memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); //printk("scsi%d.%c: sense buffer: ", info->host->host_no, '0' + SCpnt->device->id); //{ int i; for (i = 0; i < 32; i++) printk("%02x ", SCpnt->sense_buffer[i]); printk("\n"); } /* * Note that we don't set SCpnt->result, since that should * reflect the status of the command that we were asked by * the upper layers to process. This would have been set * correctly by fas216_std_done. */ scsi_eh_restore_cmnd(SCpnt, &info->ses); fas216_cmd_priv(SCpnt)->scsi_done(SCpnt); } /** * fas216_std_done - finish processing of standard command * @info: interface that completed * @SCpnt: command that completed * @result: driver byte of result * * Finish processing of standard command */ static void fas216_std_done(FAS216_Info *info, struct scsi_cmnd *SCpnt, unsigned int result) { struct scsi_pointer *scsi_pointer = arm_scsi_pointer(SCpnt); info->stats.fins += 1; set_host_byte(SCpnt, result); if (result == DID_OK) scsi_msg_to_host_byte(SCpnt, info->scsi.SCp.Message); set_status_byte(SCpnt, info->scsi.SCp.Status); fas216_log_command(info, LOG_CONNECT, SCpnt, "command complete, result=0x%08x", SCpnt->result); /* * If the driver detected an error, we're all done. */ if (get_host_byte(SCpnt) != DID_OK) goto done; /* * If the command returned CHECK_CONDITION or COMMAND_TERMINATED * status, request the sense information. */ if (get_status_byte(SCpnt) == SAM_STAT_CHECK_CONDITION || get_status_byte(SCpnt) == SAM_STAT_COMMAND_TERMINATED) goto request_sense; /* * If the command did not complete with GOOD status, * we are all done here. */ if (get_status_byte(SCpnt) != SAM_STAT_GOOD) goto done; /* * We have successfully completed a command. Make sure that * we do not have any buffers left to transfer. The world * is not perfect, and we seem to occasionally hit this. * It can be indicative of a buggy driver, target or the upper * levels of the SCSI code. */ if (info->scsi.SCp.ptr) { switch (SCpnt->cmnd[0]) { case INQUIRY: case START_STOP: case MODE_SENSE: break; default: scmd_printk(KERN_ERR, SCpnt, "incomplete data transfer detected: res=%08X ptr=%p len=%X\n", SCpnt->result, info->scsi.SCp.ptr, info->scsi.SCp.this_residual); scsi_print_command(SCpnt); set_host_byte(SCpnt, DID_ERROR); goto request_sense; } } done: if (fas216_cmd_priv(SCpnt)->scsi_done) { fas216_cmd_priv(SCpnt)->scsi_done(SCpnt); return; } panic("scsi%d.H: null scsi_done function in fas216_done", info->host->host_no); request_sense: if (SCpnt->cmnd[0] == REQUEST_SENSE) goto done; scsi_eh_prep_cmnd(SCpnt, &info->ses, NULL, 0, ~0); fas216_log_target(info, LOG_CONNECT, SCpnt->device->id, "requesting sense"); init_SCp(SCpnt); scsi_pointer->Message = 0; scsi_pointer->Status = 0; SCpnt->host_scribble = (void *)fas216_rq_sns_done; /* * Place this command into the high priority "request * sense" slot. This will be the very next command * executed, unless a target connects to us. */ if (info->reqSCpnt) printk(KERN_WARNING "scsi%d.%c: losing request command\n", info->host->host_no, '0' + SCpnt->device->id); info->reqSCpnt = SCpnt; } /** * fas216_done - complete processing for current command * @info: interface that completed * @result: driver byte of result * * Complete processing for current command */ static void fas216_done(FAS216_Info *info, unsigned int result) { void (*fn)(FAS216_Info *, struct scsi_cmnd *, unsigned int); struct scsi_cmnd *SCpnt; unsigned long flags; fas216_checkmagic(info); if (!info->SCpnt) goto no_command; SCpnt = info->SCpnt; info->SCpnt = NULL; info->scsi.phase = PHASE_IDLE; if (info->scsi.aborting) { fas216_log(info, 0, "uncaught abort - returning DID_ABORT"); result = DID_ABORT; info->scsi.aborting = 0; } /* * Sanity check the completion - if we have zero bytes left * to transfer, we should not have a valid pointer. */ if (info->scsi.SCp.ptr && info->scsi.SCp.this_residual == 0) { scmd_printk(KERN_INFO, SCpnt, "zero bytes left to transfer, but buffer pointer still valid: ptr=%p len=%08x\n", info->scsi.SCp.ptr, info->scsi.SCp.this_residual); info->scsi.SCp.ptr = NULL; scsi_print_command(SCpnt); } /* * Clear down this command as completed. If we need to request * the sense information, fas216_kick will re-assert the busy * status. */ info->device[SCpnt->device->id].parity_check = 0; clear_bit(SCpnt->device->id * 8 + (u8)(SCpnt->device->lun & 0x7), info->busyluns); fn = (void (*)(FAS216_Info *, struct scsi_cmnd *, unsigned int))SCpnt->host_scribble; fn(info, SCpnt, result); if (info->scsi.irq) { spin_lock_irqsave(&info->host_lock, flags); if (info->scsi.phase == PHASE_IDLE) fas216_kick(info); spin_unlock_irqrestore(&info->host_lock, flags); } return; no_command: panic("scsi%d.H: null command in fas216_done", info->host->host_no); } /** * fas216_queue_command_internal - queue a command for the adapter to process * @SCpnt: Command to queue * @done: done function to call once command is complete * * Queue a command for adapter to process. * Returns: 0 on success, else error. * Notes: io_request_lock is held, interrupts are disabled. */ static int fas216_queue_command_internal(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) { FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata; int result; fas216_checkmagic(info); fas216_log_command(info, LOG_CONNECT, SCpnt, "received command (%p)", SCpnt); fas216_cmd_priv(SCpnt)->scsi_done = done; SCpnt->host_scribble = (void *)fas216_std_done; SCpnt->result = 0; init_SCp(SCpnt); info->stats.queues += 1; spin_lock(&info->host_lock); /* * Add command into execute queue and let it complete under * whatever scheme we're using. */ result = !queue_add_cmd_ordered(&info->queues.issue, SCpnt); /* * If we successfully added the command, * kick the interface to get it moving. */ if (result == 0 && info->scsi.phase == PHASE_IDLE) fas216_kick(info); spin_unlock(&info->host_lock); fas216_log_target(info, LOG_CONNECT, -1, "queue %s", result ? "failure" : "success"); return result; } static int fas216_queue_command_lck(struct scsi_cmnd *SCpnt) { return fas216_queue_command_internal(SCpnt, scsi_done); } DEF_SCSI_QCMD(fas216_queue_command) /** * fas216_internal_done - trigger restart of a waiting thread in fas216_noqueue_command * @SCpnt: Command to wake * * Trigger restart of a waiting thread in fas216_command */ static void fas216_internal_done(struct scsi_cmnd *SCpnt) { FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata; fas216_checkmagic(info); info->internal_done = 1; } /** * fas216_noqueue_command - process a command for the adapter. * @SCpnt: Command to queue * * Queue a command for adapter to process. * Returns: scsi result code. * Notes: io_request_lock is held, interrupts are disabled. */ static int fas216_noqueue_command_lck(struct scsi_cmnd *SCpnt) { FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata; fas216_checkmagic(info); /* * We should only be using this if we don't have an interrupt. * Provide some "incentive" to use the queueing code. */ BUG_ON(info->scsi.irq); info->internal_done = 0; fas216_queue_command_internal(SCpnt, fas216_internal_done); /* * This wastes time, since we can't return until the command is * complete. We can't sleep either since we may get re-entered! * However, we must re-enable interrupts, or else we'll be * waiting forever. */ spin_unlock_irq(info->host->host_lock); while (!info->internal_done) { /* * If we don't have an IRQ, then we must poll the card for * it's interrupt, and use that to call this driver's * interrupt routine. That way, we keep the command * progressing. Maybe we can add some intelligence here * and go to sleep if we know that the device is going * to be some time (eg, disconnected). */ if (fas216_readb(info, REG_STAT) & STAT_INT) { spin_lock_irq(info->host->host_lock); fas216_intr(info); spin_unlock_irq(info->host->host_lock); } } spin_lock_irq(info->host->host_lock); scsi_done(SCpnt); return 0; } DEF_SCSI_QCMD(fas216_noqueue_command) /* * Error handler timeout function. Indicate that we timed out, * and wake up any error handler process so it can continue. */ static void fas216_eh_timer(struct timer_list *t) { FAS216_Info *info = from_timer(info, t, eh_timer); fas216_log(info, LOG_ERROR, "error handling timed out\n"); del_timer(&info->eh_timer); if (info->rst_bus_status == 0) info->rst_bus_status = -1; if (info->rst_dev_status == 0) info->rst_dev_status = -1; wake_up(&info->eh_wait); } enum res_find { res_failed, /* not found */ res_success, /* command on issue queue */ res_hw_abort /* command on disconnected dev */ }; /** * fas216_do_abort - decide how to abort a command * @SCpnt: command to abort * * Decide how to abort a command. * Returns: abort status */ static enum res_find fas216_find_command(FAS216_Info *info, struct scsi_cmnd *SCpnt) { enum res_find res = res_failed; if (queue_remove_cmd(&info->queues.issue, SCpnt)) { /* * The command was on the issue queue, and has not been * issued yet. We can remove the command from the queue, * and acknowledge the abort. Neither the device nor the * interface know about the command. */ printk("on issue queue "); res = res_success; } else if (queue_remove_cmd(&info->queues.disconnected, SCpnt)) { /* * The command was on the disconnected queue. We must * reconnect with the device if possible, and send it * an abort message. */ printk("on disconnected queue "); res = res_hw_abort; } else if (info->SCpnt == SCpnt) { printk("executing "); switch (info->scsi.phase) { /* * If the interface is idle, and the command is 'disconnectable', * then it is the same as on the disconnected queue. */ case PHASE_IDLE: if (info->scsi.disconnectable) { info->scsi.disconnectable = 0; info->SCpnt = NULL; res = res_hw_abort; } break; default: break; } } else if (info->origSCpnt == SCpnt) { /* * The command will be executed next, but a command * is currently using the interface. This is similar to * being on the issue queue, except the busylun bit has * been set. */ info->origSCpnt = NULL; clear_bit(SCpnt->device->id * 8 + (u8)(SCpnt->device->lun & 0x7), info->busyluns); printk("waiting for execution "); res = res_success; } else printk("unknown "); return res; } /** * fas216_eh_abort - abort this command * @SCpnt: command to abort * * Abort this command. * Returns: FAILED if unable to abort * Notes: io_request_lock is taken, and irqs are disabled */ int fas216_eh_abort(struct scsi_cmnd *SCpnt) { FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata; int result = FAILED; fas216_checkmagic(info); info->stats.aborts += 1; scmd_printk(KERN_WARNING, SCpnt, "abort command\n"); print_debug_list(); fas216_dumpstate(info); switch (fas216_find_command(info, SCpnt)) { /* * We found the command, and cleared it out. Either * the command is still known to be executing on the * target, or the busylun bit is not set. */ case res_success: scmd_printk(KERN_WARNING, SCpnt, "abort %p success\n", SCpnt); result = SUCCESS; break; /* * We need to reconnect to the target and send it an * ABORT or ABORT_TAG message. We can only do this * if the bus is free. */ case res_hw_abort: /* * We are unable to abort the command for some reason. */ default: case res_failed: scmd_printk(KERN_WARNING, SCpnt, "abort %p failed\n", SCpnt); break; } return result; } /** * fas216_eh_device_reset - Reset the device associated with this command * @SCpnt: command specifing device to reset * * Reset the device associated with this command. * Returns: FAILED if unable to reset. * Notes: We won't be re-entered, so we'll only have one device * reset on the go at one time. */ int fas216_eh_device_reset(struct scsi_cmnd *SCpnt) { FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata; unsigned long flags; int i, res = FAILED, target = SCpnt->device->id; fas216_log(info, LOG_ERROR, "device reset for target %d", target); spin_lock_irqsave(&info->host_lock, flags); do { /* * If we are currently connected to a device, and * it is the device we want to reset, there is * nothing we can do here. Chances are it is stuck, * and we need a bus reset. */ if (info->SCpnt && !info->scsi.disconnectable && info->SCpnt->device->id == SCpnt->device->id) break; /* * We're going to be resetting this device. Remove * all pending commands from the driver. By doing * so, we guarantee that we won't touch the command * structures except to process the reset request. */ queue_remove_all_target(&info->queues.issue, target); queue_remove_all_target(&info->queues.disconnected, target); if (info->origSCpnt && info->origSCpnt->device->id == target) info->origSCpnt = NULL; if (info->reqSCpnt && info->reqSCpnt->device->id == target) info->reqSCpnt = NULL; for (i = 0; i < 8; i++) clear_bit(target * 8 + i, info->busyluns); /* * Hijack this SCSI command structure to send * a bus device reset message to this device. */ SCpnt->host_scribble = (void *)fas216_devicereset_done; info->rst_dev_status = 0; info->rstSCpnt = SCpnt; if (info->scsi.phase == PHASE_IDLE) fas216_kick(info); mod_timer(&info->eh_timer, jiffies + 30 * HZ); spin_unlock_irqrestore(&info->host_lock, flags); /* * Wait up to 30 seconds for the reset to complete. */ wait_event(info->eh_wait, info->rst_dev_status); del_timer_sync(&info->eh_timer); spin_lock_irqsave(&info->host_lock, flags); info->rstSCpnt = NULL; if (info->rst_dev_status == 1) res = SUCCESS; } while (0); SCpnt->host_scribble = NULL; spin_unlock_irqrestore(&info->host_lock, flags); fas216_log(info, LOG_ERROR, "device reset complete: %s\n", res == SUCCESS ? "success" : "failed"); return res; } /** * fas216_eh_bus_reset - Reset the bus associated with the command * @SCpnt: command specifing bus to reset * * Reset the bus associated with the command. * Returns: FAILED if unable to reset. * Notes: Further commands are blocked. */ int fas216_eh_bus_reset(struct scsi_cmnd *SCpnt) { FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata; unsigned long flags; struct scsi_device *SDpnt; fas216_checkmagic(info); fas216_log(info, LOG_ERROR, "resetting bus"); info->stats.bus_resets += 1; spin_lock_irqsave(&info->host_lock, flags); /* * Stop all activity on this interface. */ fas216_aborttransfer(info); fas216_writeb(info, REG_CNTL3, info->scsi.cfg[2]); /* * Clear any pending interrupts. */ while (fas216_readb(info, REG_STAT) & STAT_INT) fas216_readb(info, REG_INST); info->rst_bus_status = 0; /* * For each attached hard-reset device, clear out * all command structures. Leave the running * command in place. */ shost_for_each_device(SDpnt, info->host) { int i; if (SDpnt->soft_reset) continue; queue_remove_all_target(&info->queues.issue, SDpnt->id); queue_remove_all_target(&info->queues.disconnected, SDpnt->id); if (info->origSCpnt && info->origSCpnt->device->id == SDpnt->id) info->origSCpnt = NULL; if (info->reqSCpnt && info->reqSCpnt->device->id == SDpnt->id) info->reqSCpnt = NULL; info->SCpnt = NULL; for (i = 0; i < 8; i++) clear_bit(SDpnt->id * 8 + i, info->busyluns); } info->scsi.phase = PHASE_IDLE; /* * Reset the SCSI bus. Device cleanup happens in * the interrupt handler. */ fas216_cmd(info, CMD_RESETSCSI); mod_timer(&info->eh_timer, jiffies + HZ); spin_unlock_irqrestore(&info->host_lock, flags); /* * Wait one second for the interrupt. */ wait_event(info->eh_wait, info->rst_bus_status); del_timer_sync(&info->eh_timer); fas216_log(info, LOG_ERROR, "bus reset complete: %s\n", info->rst_bus_status == 1 ? "success" : "failed"); return info->rst_bus_status == 1 ? SUCCESS : FAILED; } /** * fas216_init_chip - Initialise FAS216 state after reset * @info: state structure for interface * * Initialise FAS216 state after reset */ static void fas216_init_chip(FAS216_Info *info) { unsigned int clock = ((info->ifcfg.clockrate - 1) / 5 + 1) & 7; fas216_writeb(info, REG_CLKF, clock); fas216_writeb(info, REG_CNTL1, info->scsi.cfg[0]); fas216_writeb(info, REG_CNTL2, info->scsi.cfg[1]); fas216_writeb(info, REG_CNTL3, info->scsi.cfg[2]); fas216_writeb(info, REG_STIM, info->ifcfg.select_timeout); fas216_writeb(info, REG_SOF, 0); fas216_writeb(info, REG_STP, info->scsi.async_stp); fas216_writeb(info, REG_CNTL1, info->scsi.cfg[0]); } /** * fas216_eh_host_reset - Reset the host associated with this command * @SCpnt: command specifing host to reset * * Reset the host associated with this command. * Returns: FAILED if unable to reset. * Notes: io_request_lock is taken, and irqs are disabled */ int fas216_eh_host_reset(struct scsi_cmnd *SCpnt) { FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata; spin_lock_irq(info->host->host_lock); fas216_checkmagic(info); fas216_log(info, LOG_ERROR, "resetting host"); /* * Reset the SCSI chip. */ fas216_cmd(info, CMD_RESETCHIP); /* * Ugly ugly ugly! * We need to release the host_lock and enable * IRQs if we sleep, but we must relock and disable * IRQs after the sleep. */ spin_unlock_irq(info->host->host_lock); msleep(50 * 1000/100); spin_lock_irq(info->host->host_lock); /* * Release the SCSI reset. */ fas216_cmd(info, CMD_NOP); fas216_init_chip(info); spin_unlock_irq(info->host->host_lock); return SUCCESS; } #define TYPE_UNKNOWN 0 #define TYPE_NCR53C90 1 #define TYPE_NCR53C90A 2 #define TYPE_NCR53C9x 3 #define TYPE_Am53CF94 4 #define TYPE_EmFAS216 5 #define TYPE_QLFAS216 6 static char *chip_types[] = { "unknown", "NS NCR53C90", "NS NCR53C90A", "NS NCR53C9x", "AMD Am53CF94", "Emulex FAS216", "QLogic FAS216" }; static int fas216_detect_type(FAS216_Info *info) { int family, rev; /* * Reset the chip. */ fas216_writeb(info, REG_CMD, CMD_RESETCHIP); udelay(50); fas216_writeb(info, REG_CMD, CMD_NOP); /* * Check to see if control reg 2 is present. */ fas216_writeb(info, REG_CNTL3, 0); fas216_writeb(info, REG_CNTL2, CNTL2_S2FE); /* * If we are unable to read back control reg 2 * correctly, it is not present, and we have a * NCR53C90. */ if ((fas216_readb(info, REG_CNTL2) & (~0xe0)) != CNTL2_S2FE) return TYPE_NCR53C90; /* * Now, check control register 3 */ fas216_writeb(info, REG_CNTL2, 0); fas216_writeb(info, REG_CNTL3, 0); fas216_writeb(info, REG_CNTL3, 5); /* * If we are unable to read the register back * correctly, we have a NCR53C90A */ if (fas216_readb(info, REG_CNTL3) != 5) return TYPE_NCR53C90A; /* * Now read the ID from the chip. */ fas216_writeb(info, REG_CNTL3, 0); fas216_writeb(info, REG_CNTL3, CNTL3_ADIDCHK); fas216_writeb(info, REG_CNTL3, 0); fas216_writeb(info, REG_CMD, CMD_RESETCHIP); udelay(50); fas216_writeb(info, REG_CMD, CMD_WITHDMA | CMD_NOP); fas216_writeb(info, REG_CNTL2, CNTL2_ENF); fas216_writeb(info, REG_CMD, CMD_RESETCHIP); udelay(50); fas216_writeb(info, REG_CMD, CMD_NOP); rev = fas216_readb(info, REG_ID); family = rev >> 3; rev &= 7; switch (family) { case 0x01: if (rev == 4) return TYPE_Am53CF94; break; case 0x02: switch (rev) { case 2: return TYPE_EmFAS216; case 3: return TYPE_QLFAS216; } break; default: break; } printk("family %x rev %x\n", family, rev); return TYPE_NCR53C9x; } /** * fas216_reset_state - Initialise driver internal state * @info: state to initialise * * Initialise driver internal state */ static void fas216_reset_state(FAS216_Info *info) { int i; fas216_checkmagic(info); fas216_bus_reset(info); /* * Clear out all stale info in our state structure */ memset(info->busyluns, 0, sizeof(info->busyluns)); info->scsi.disconnectable = 0; info->scsi.aborting = 0; for (i = 0; i < 8; i++) { info->device[i].parity_enabled = 0; info->device[i].parity_check = 1; } /* * Drain all commands on disconnected queue */ while (queue_remove(&info->queues.disconnected) != NULL); /* * Remove executing commands. */ info->SCpnt = NULL; info->reqSCpnt = NULL; info->rstSCpnt = NULL; info->origSCpnt = NULL; } /** * fas216_init - initialise FAS/NCR/AMD SCSI structures. * @host: a driver-specific filled-out structure * * Initialise FAS/NCR/AMD SCSI structures. * Returns: 0 on success */ int fas216_init(struct Scsi_Host *host) { FAS216_Info *info = (FAS216_Info *)host->hostdata; info->magic_start = MAGIC; info->magic_end = MAGIC; info->host = host; info->scsi.cfg[0] = host->this_id | CNTL1_PERE; info->scsi.cfg[1] = CNTL2_ENF | CNTL2_S2FE; info->scsi.cfg[2] = info->ifcfg.cntl3 | CNTL3_ADIDCHK | CNTL3_QTAG | CNTL3_G2CB | CNTL3_LBTM; info->scsi.async_stp = fas216_syncperiod(info, info->ifcfg.asyncperiod); info->rst_dev_status = -1; info->rst_bus_status = -1; init_waitqueue_head(&info->eh_wait); timer_setup(&info->eh_timer, fas216_eh_timer, 0); spin_lock_init(&info->host_lock); memset(&info->stats, 0, sizeof(info->stats)); msgqueue_initialise(&info->scsi.msgs); if (!queue_initialise(&info->queues.issue)) return -ENOMEM; if (!queue_initialise(&info->queues.disconnected)) { queue_free(&info->queues.issue); return -ENOMEM; } return 0; } /** * fas216_add - initialise FAS/NCR/AMD SCSI ic. * @host: a driver-specific filled-out structure * @dev: parent device * * Initialise FAS/NCR/AMD SCSI ic. * Returns: 0 on success */ int fas216_add(struct Scsi_Host *host, struct device *dev) { FAS216_Info *info = (FAS216_Info *)host->hostdata; int type, ret; if (info->ifcfg.clockrate <= 10 || info->ifcfg.clockrate > 40) { printk(KERN_CRIT "fas216: invalid clock rate %u MHz\n", info->ifcfg.clockrate); return -EINVAL; } fas216_reset_state(info); type = fas216_detect_type(info); info->scsi.type = chip_types[type]; udelay(300); /* * Initialise the chip correctly. */ fas216_init_chip(info); /* * Reset the SCSI bus. We don't want to see * the resulting reset interrupt, so mask it * out. */ fas216_writeb(info, REG_CNTL1, info->scsi.cfg[0] | CNTL1_DISR); fas216_writeb(info, REG_CMD, CMD_RESETSCSI); /* * scsi standard says wait 250ms */ spin_unlock_irq(info->host->host_lock); msleep(100*1000/100); spin_lock_irq(info->host->host_lock); fas216_writeb(info, REG_CNTL1, info->scsi.cfg[0]); fas216_readb(info, REG_INST); fas216_checkmagic(info); ret = scsi_add_host(host, dev); if (ret) fas216_writeb(info, REG_CMD, CMD_RESETCHIP); else scsi_scan_host(host); return ret; } void fas216_remove(struct Scsi_Host *host) { FAS216_Info *info = (FAS216_Info *)host->hostdata; fas216_checkmagic(info); scsi_remove_host(host); fas216_writeb(info, REG_CMD, CMD_RESETCHIP); scsi_host_put(host); } /** * fas216_release - release all resources for FAS/NCR/AMD SCSI ic. * @host: a driver-specific filled-out structure * * release all resources and put everything to bed for FAS/NCR/AMD SCSI ic. */ void fas216_release(struct Scsi_Host *host) { FAS216_Info *info = (FAS216_Info *)host->hostdata; queue_free(&info->queues.disconnected); queue_free(&info->queues.issue); } void fas216_print_host(FAS216_Info *info, struct seq_file *m) { seq_printf(m, "\n" "Chip : %s\n" " Address: 0x%p\n" " IRQ : %d\n" " DMA : %d\n", info->scsi.type, info->scsi.io_base, info->scsi.irq, info->scsi.dma); } void fas216_print_stats(FAS216_Info *info, struct seq_file *m) { seq_printf(m, "\n" "Command Statistics:\n" " Queued : %u\n" " Issued : %u\n" " Completed : %u\n" " Reads : %u\n" " Writes : %u\n" " Others : %u\n" " Disconnects: %u\n" " Aborts : %u\n" " Bus resets : %u\n" " Host resets: %u\n", info->stats.queues, info->stats.removes, info->stats.fins, info->stats.reads, info->stats.writes, info->stats.miscs, info->stats.disconnects, info->stats.aborts, info->stats.bus_resets, info->stats.host_resets); } void fas216_print_devices(FAS216_Info *info, struct seq_file *m) { struct fas216_device *dev; struct scsi_device *scd; seq_puts(m, "Device/Lun TaggedQ Parity Sync\n"); shost_for_each_device(scd, info->host) { dev = &info->device[scd->id]; seq_printf(m, " %d/%llu ", scd->id, scd->lun); if (scd->tagged_supported) seq_printf(m, "%3sabled ", scd->simple_tags ? "en" : "dis"); else seq_puts(m, "unsupported "); seq_printf(m, "%3sabled ", dev->parity_enabled ? "en" : "dis"); if (dev->sof) seq_printf(m, "offset %d, %d ns\n", dev->sof, dev->period * 4); else seq_puts(m, "async\n"); } } EXPORT_SYMBOL(fas216_init); EXPORT_SYMBOL(fas216_add); EXPORT_SYMBOL(fas216_queue_command); EXPORT_SYMBOL(fas216_noqueue_command); EXPORT_SYMBOL(fas216_intr); EXPORT_SYMBOL(fas216_remove); EXPORT_SYMBOL(fas216_release); EXPORT_SYMBOL(fas216_eh_abort); EXPORT_SYMBOL(fas216_eh_device_reset); EXPORT_SYMBOL(fas216_eh_bus_reset); EXPORT_SYMBOL(fas216_eh_host_reset); EXPORT_SYMBOL(fas216_print_host); EXPORT_SYMBOL(fas216_print_stats); EXPORT_SYMBOL(fas216_print_devices); MODULE_AUTHOR("Russell King"); MODULE_DESCRIPTION("Generic FAS216/NCR53C9x driver core"); MODULE_LICENSE("GPL");
linux-master
drivers/scsi/arm/fas216.c
// SPDX-License-Identifier: GPL-2.0-only /* * Oak Generic NCR5380 driver * * Copyright 1995-2002, Russell King */ #include <linux/module.h> #include <linux/ioport.h> #include <linux/blkdev.h> #include <linux/init.h> #include <asm/ecard.h> #include <asm/io.h> #include <scsi/scsi_host.h> #define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata) #define NCR5380_read(reg) readb(hostdata->io + ((reg) << 2)) #define NCR5380_write(reg, value) writeb(value, hostdata->io + ((reg) << 2)) #define NCR5380_dma_xfer_len NCR5380_dma_xfer_none #define NCR5380_dma_recv_setup oakscsi_pread #define NCR5380_dma_send_setup oakscsi_pwrite #define NCR5380_dma_residual NCR5380_dma_residual_none #define NCR5380_queue_command oakscsi_queue_command #define NCR5380_info oakscsi_info #define NCR5380_implementation_fields /* none */ #include "../NCR5380.h" #undef START_DMA_INITIATOR_RECEIVE_REG #define START_DMA_INITIATOR_RECEIVE_REG (128 + 7) #define STAT ((128 + 16) << 2) #define DATA ((128 + 8) << 2) static inline int oakscsi_pwrite(struct NCR5380_hostdata *hostdata, unsigned char *addr, int len) { u8 __iomem *base = hostdata->io; printk("writing %p len %d\n",addr, len); while(1) { int status; while (((status = readw(base + STAT)) & 0x100)==0); } return 0; } static inline int oakscsi_pread(struct NCR5380_hostdata *hostdata, unsigned char *addr, int len) { u8 __iomem *base = hostdata->io; printk("reading %p len %d\n", addr, len); while(len > 0) { unsigned int status, timeout; unsigned long b; timeout = 0x01FFFFFF; while (((status = readw(base + STAT)) & 0x100)==0) { timeout--; if(status & 0x200 || !timeout) { printk("status = %08X\n", status); return -1; } } if(len >= 128) { readsw(base + DATA, addr, 128); addr += 128; len -= 128; } else { b = (unsigned long) readw(base + DATA); *addr ++ = b; len -= 1; if(len) *addr ++ = b>>8; len -= 1; } } return 0; } #undef STAT #undef DATA #include "../NCR5380.c" static const struct scsi_host_template oakscsi_template = { .module = THIS_MODULE, .name = "Oak 16-bit SCSI", .info = oakscsi_info, .queuecommand = oakscsi_queue_command, .eh_abort_handler = NCR5380_abort, .eh_host_reset_handler = NCR5380_host_reset, .can_queue = 16, .this_id = 7, .sg_tablesize = SG_ALL, .cmd_per_lun = 2, .dma_boundary = PAGE_SIZE - 1, .proc_name = "oakscsi", .cmd_size = sizeof(struct NCR5380_cmd), .max_sectors = 128, }; static int oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id) { struct Scsi_Host *host; int ret; ret = ecard_request_resources(ec); if (ret) goto out; host = scsi_host_alloc(&oakscsi_template, sizeof(struct NCR5380_hostdata)); if (!host) { ret = -ENOMEM; goto release; } priv(host)->io = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC), ecard_resource_len(ec, ECARD_RES_MEMC)); if (!priv(host)->io) { ret = -ENOMEM; goto unreg; } host->irq = NO_IRQ; ret = NCR5380_init(host, FLAG_DMA_FIXUP | FLAG_LATE_DMA_SETUP); if (ret) goto out_unmap; NCR5380_maybe_reset_bus(host); ret = scsi_add_host(host, &ec->dev); if (ret) goto out_exit; scsi_scan_host(host); goto out; out_exit: NCR5380_exit(host); out_unmap: iounmap(priv(host)->io); unreg: scsi_host_put(host); release: ecard_release_resources(ec); out: return ret; } static void oakscsi_remove(struct expansion_card *ec) { struct Scsi_Host *host = ecard_get_drvdata(ec); void __iomem *base = priv(host)->io; ecard_set_drvdata(ec, NULL); scsi_remove_host(host); NCR5380_exit(host); scsi_host_put(host); iounmap(base); ecard_release_resources(ec); } static const struct ecard_id oakscsi_cids[] = { { MANU_OAK, PROD_OAK_SCSI }, { 0xffff, 0xffff } }; static struct ecard_driver oakscsi_driver = { .probe = oakscsi_probe, .remove = oakscsi_remove, .id_table = oakscsi_cids, .drv = { .name = "oakscsi", }, }; static int __init oakscsi_init(void) { return ecard_register_driver(&oakscsi_driver); } static void __exit oakscsi_exit(void) { ecard_remove_driver(&oakscsi_driver); } module_init(oakscsi_init); module_exit(oakscsi_exit); MODULE_AUTHOR("Russell King"); MODULE_DESCRIPTION("Oak SCSI driver"); MODULE_LICENSE("GPL");
linux-master
drivers/scsi/arm/oak.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/drivers/acorn/scsi/acornscsi.c * * Acorn SCSI 3 driver * By R.M.King. * * Abandoned using the Select and Transfer command since there were * some nasty races between our software and the target devices that * were not easy to solve, and the device errata had a lot of entries * for this command, some of them quite nasty... * * Changelog: * 26-Sep-1997 RMK Re-jigged to use the queue module. * Re-coded state machine to be based on driver * state not scsi state. Should be easier to debug. * Added acornscsi_release to clean up properly. * Updated proc/scsi reporting. * 05-Oct-1997 RMK Implemented writing to SCSI devices. * 06-Oct-1997 RMK Corrected small (non-serious) bug with the connect/ * reconnect race condition causing a warning message. * 12-Oct-1997 RMK Added catch for re-entering interrupt routine. * 15-Oct-1997 RMK Improved handling of commands. * 27-Jun-1998 RMK Changed asm/delay.h to linux/delay.h. * 13-Dec-1998 RMK Better abort code and command handling. Extra state * transitions added to allow dodgy devices to work. */ #define DEBUG_NO_WRITE 1 #define DEBUG_QUEUES 2 #define DEBUG_DMA 4 #define DEBUG_ABORT 8 #define DEBUG_DISCON 16 #define DEBUG_CONNECT 32 #define DEBUG_PHASES 64 #define DEBUG_WRITE 128 #define DEBUG_LINK 256 #define DEBUG_MESSAGES 512 #define DEBUG_RESET 1024 #define DEBUG_ALL (DEBUG_RESET|DEBUG_MESSAGES|DEBUG_LINK|DEBUG_WRITE|\ DEBUG_PHASES|DEBUG_CONNECT|DEBUG_DISCON|DEBUG_ABORT|\ DEBUG_DMA|DEBUG_QUEUES) /* DRIVER CONFIGURATION * * SCSI-II Tagged queue support. * * I don't have any SCSI devices that support it, so it is totally untested * (except to make sure that it doesn't interfere with any non-tagging * devices). It is not fully implemented either - what happens when a * tagging device reconnects??? * * You can tell if you have a device that supports tagged queueing my * cating (eg) /proc/scsi/acornscsi/0 and see if the SCSI revision is reported * as '2 TAG'. */ /* * SCSI-II Synchronous transfer support. * * Tried and tested... * * SDTR_SIZE - maximum number of un-acknowledged bytes (0 = off, 12 = max) * SDTR_PERIOD - period of REQ signal (min=125, max=1020) * DEFAULT_PERIOD - default REQ period. */ #define SDTR_SIZE 12 #define SDTR_PERIOD 125 #define DEFAULT_PERIOD 500 /* * Debugging information * * DEBUG - bit mask from list above * DEBUG_TARGET - is defined to the target number if you want to debug * a specific target. [only recon/write/dma]. */ #define DEBUG (DEBUG_RESET|DEBUG_WRITE|DEBUG_NO_WRITE) /* only allow writing to SCSI device 0 */ #define NO_WRITE 0xFE /*#define DEBUG_TARGET 2*/ /* * Select timeout time (in 10ms units) * * This is the timeout used between the start of selection and the WD33C93 * chip deciding that the device isn't responding. */ #define TIMEOUT_TIME 10 /* * Define this if you want to have verbose explanation of SCSI * status/messages. */ #undef CONFIG_ACORNSCSI_CONSTANTS /* * Define this if you want to use the on board DMAC [don't remove this option] * If not set, then use PIO mode (not currently supported). */ #define USE_DMAC /* * ==================================================================================== */ #ifdef DEBUG_TARGET #define DBG(cmd,xxx...) \ if (cmd->device->id == DEBUG_TARGET) { \ xxx; \ } #else #define DBG(cmd,xxx...) xxx #endif #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/signal.h> #include <linux/errno.h> #include <linux/proc_fs.h> #include <linux/ioport.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/bitops.h> #include <linux/stringify.h> #include <linux/io.h> #include <asm/ecard.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_dbg.h> #include <scsi/scsi_device.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_transport_spi.h> #include "acornscsi.h" #include "msgqueue.h" #include "arm_scsi.h" #include <scsi/scsicam.h> #define VER_MAJOR 2 #define VER_MINOR 0 #define VER_PATCH 6 #ifdef USE_DMAC /* * DMAC setup parameters */ #define INIT_DEVCON0 (DEVCON0_RQL|DEVCON0_EXW|DEVCON0_CMP) #define INIT_DEVCON1 (DEVCON1_BHLD) #define DMAC_READ (MODECON_READ) #define DMAC_WRITE (MODECON_WRITE) #define INIT_SBICDMA (CTRL_DMABURST) #define scsi_xferred have_data_in /* * Size of on-board DMA buffer */ #define DMAC_BUFFER_SIZE 65536 #endif #define STATUS_BUFFER_TO_PRINT 24 unsigned int sdtr_period = SDTR_PERIOD; unsigned int sdtr_size = SDTR_SIZE; static void acornscsi_done(AS_Host *host, struct scsi_cmnd **SCpntp, unsigned int result); static int acornscsi_reconnect_finish(AS_Host *host); static void acornscsi_dma_cleanup(AS_Host *host); static void acornscsi_abortcmd(AS_Host *host); /* ==================================================================================== * Miscellaneous */ /* Offsets from MEMC base */ #define SBIC_REGIDX 0x2000 #define SBIC_REGVAL 0x2004 #define DMAC_OFFSET 0x3000 /* Offsets from FAST IOC base */ #define INT_REG 0x2000 #define PAGE_REG 0x3000 static inline void sbic_arm_write(AS_Host *host, unsigned int reg, unsigned int value) { writeb(reg, host->base + SBIC_REGIDX); writeb(value, host->base + SBIC_REGVAL); } static inline int sbic_arm_read(AS_Host *host, unsigned int reg) { if(reg == SBIC_ASR) return readl(host->base + SBIC_REGIDX) & 255; writeb(reg, host->base + SBIC_REGIDX); return readl(host->base + SBIC_REGVAL) & 255; } #define sbic_arm_writenext(host, val) writeb((val), (host)->base + SBIC_REGVAL) #define sbic_arm_readnext(host) readb((host)->base + SBIC_REGVAL) #ifdef USE_DMAC #define dmac_read(host,reg) \ readb((host)->base + DMAC_OFFSET + ((reg) << 2)) #define dmac_write(host,reg,value) \ ({ writeb((value), (host)->base + DMAC_OFFSET + ((reg) << 2)); }) #define dmac_clearintr(host) writeb(0, (host)->fast + INT_REG) static inline unsigned int dmac_address(AS_Host *host) { return dmac_read(host, DMAC_TXADRHI) << 16 | dmac_read(host, DMAC_TXADRMD) << 8 | dmac_read(host, DMAC_TXADRLO); } static void acornscsi_dumpdma(AS_Host *host, char *where) { unsigned int mode, addr, len; mode = dmac_read(host, DMAC_MODECON); addr = dmac_address(host); len = dmac_read(host, DMAC_TXCNTHI) << 8 | dmac_read(host, DMAC_TXCNTLO); printk("scsi%d: %s: DMAC %02x @%06x+%04x msk %02x, ", host->host->host_no, where, mode, addr, (len + 1) & 0xffff, dmac_read(host, DMAC_MASKREG)); printk("DMA @%06x, ", host->dma.start_addr); printk("BH @%p +%04x, ", host->scsi.SCp.ptr, host->scsi.SCp.this_residual); printk("DT @+%04x ST @+%04x", host->dma.transferred, host->scsi.SCp.scsi_xferred); printk("\n"); } #endif static unsigned long acornscsi_sbic_xfcount(AS_Host *host) { unsigned long length; length = sbic_arm_read(host, SBIC_TRANSCNTH) << 16; length |= sbic_arm_readnext(host) << 8; length |= sbic_arm_readnext(host); return length; } static int acornscsi_sbic_wait(AS_Host *host, int stat_mask, int stat, int timeout, char *msg) { int asr; do { asr = sbic_arm_read(host, SBIC_ASR); if ((asr & stat_mask) == stat) return 0; udelay(1); } while (--timeout); printk("scsi%d: timeout while %s\n", host->host->host_no, msg); return -1; } static int acornscsi_sbic_issuecmd(AS_Host *host, int command) { if (acornscsi_sbic_wait(host, ASR_CIP, 0, 1000, "issuing command")) return -1; sbic_arm_write(host, SBIC_CMND, command); return 0; } static void acornscsi_csdelay(unsigned int cs) { unsigned long target_jiffies, flags; target_jiffies = jiffies + 1 + cs * HZ / 100; local_save_flags(flags); local_irq_enable(); while (time_before(jiffies, target_jiffies)) barrier(); local_irq_restore(flags); } static void acornscsi_resetcard(AS_Host *host) { unsigned int i, timeout; /* assert reset line */ host->card.page_reg = 0x80; writeb(host->card.page_reg, host->fast + PAGE_REG); /* wait 3 cs. SCSI standard says 25ms. */ acornscsi_csdelay(3); host->card.page_reg = 0; writeb(host->card.page_reg, host->fast + PAGE_REG); /* * Should get a reset from the card */ timeout = 1000; do { if (readb(host->fast + INT_REG) & 8) break; udelay(1); } while (--timeout); if (timeout == 0) printk("scsi%d: timeout while resetting card\n", host->host->host_no); sbic_arm_read(host, SBIC_ASR); sbic_arm_read(host, SBIC_SSR); /* setup sbic - WD33C93A */ sbic_arm_write(host, SBIC_OWNID, OWNID_EAF | host->host->this_id); sbic_arm_write(host, SBIC_CMND, CMND_RESET); /* * Command should cause a reset interrupt */ timeout = 1000; do { if (readb(host->fast + INT_REG) & 8) break; udelay(1); } while (--timeout); if (timeout == 0) printk("scsi%d: timeout while resetting card\n", host->host->host_no); sbic_arm_read(host, SBIC_ASR); if (sbic_arm_read(host, SBIC_SSR) != 0x01) printk(KERN_CRIT "scsi%d: WD33C93A didn't give enhanced reset interrupt\n", host->host->host_no); sbic_arm_write(host, SBIC_CTRL, INIT_SBICDMA | CTRL_IDI); sbic_arm_write(host, SBIC_TIMEOUT, TIMEOUT_TIME); sbic_arm_write(host, SBIC_SYNCHTRANSFER, SYNCHTRANSFER_2DBA); sbic_arm_write(host, SBIC_SOURCEID, SOURCEID_ER | SOURCEID_DSP); host->card.page_reg = 0x40; writeb(host->card.page_reg, host->fast + PAGE_REG); /* setup dmac - uPC71071 */ dmac_write(host, DMAC_INIT, 0); #ifdef USE_DMAC dmac_write(host, DMAC_INIT, INIT_8BIT); dmac_write(host, DMAC_CHANNEL, CHANNEL_0); dmac_write(host, DMAC_DEVCON0, INIT_DEVCON0); dmac_write(host, DMAC_DEVCON1, INIT_DEVCON1); #endif host->SCpnt = NULL; host->scsi.phase = PHASE_IDLE; host->scsi.disconnectable = 0; memset(host->busyluns, 0, sizeof(host->busyluns)); for (i = 0; i < 8; i++) { host->device[i].sync_state = SYNC_NEGOCIATE; host->device[i].disconnect_ok = 1; } /* wait 25 cs. SCSI standard says 250ms. */ acornscsi_csdelay(25); } /*============================================================================================= * Utility routines (eg. debug) */ #ifdef CONFIG_ACORNSCSI_CONSTANTS static char *acornscsi_interrupttype[] = { "rst", "suc", "p/a", "3", "term", "5", "6", "7", "serv", "9", "a", "b", "c", "d", "e", "f" }; static signed char acornscsi_map[] = { 0, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 2, -1, -1, -1, -1, 3, -1, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, -1, -1, -1, -1, -1, 4, 5, 6, 7, 8, 9, 10, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 15, 16, 17, 18, 19, -1, -1, 20, 4, 5, 6, 7, 8, 9, 10, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 21, 22, -1, -1, -1, 23, -1, -1, 4, 5, 6, 7, 8, 9, 10, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }; static char *acornscsi_interruptcode[] = { /* 0 */ "reset - normal mode", /* 00 */ "reset - advanced mode", /* 01 */ /* 2 */ "sel", /* 11 */ "sel+xfer", /* 16 */ "data-out", /* 18 */ "data-in", /* 19 */ "cmd", /* 1A */ "stat", /* 1B */ "??-out", /* 1C */ "??-in", /* 1D */ "msg-out", /* 1E */ "msg-in", /* 1F */ /* 12 */ "/ACK asserted", /* 20 */ "save-data-ptr", /* 21 */ "{re}sel", /* 22 */ /* 15 */ "inv cmd", /* 40 */ "unexpected disconnect", /* 41 */ "sel timeout", /* 42 */ "P err", /* 43 */ "P err+ATN", /* 44 */ "bad status byte", /* 47 */ /* 21 */ "resel, no id", /* 80 */ "resel", /* 81 */ "discon", /* 85 */ }; static void print_scsi_status(unsigned int ssr) { if (acornscsi_map[ssr] != -1) printk("%s:%s", acornscsi_interrupttype[(ssr >> 4)], acornscsi_interruptcode[acornscsi_map[ssr]]); else printk("%X:%X", ssr >> 4, ssr & 0x0f); } #endif static void print_sbic_status(int asr, int ssr, int cmdphase) { #ifdef CONFIG_ACORNSCSI_CONSTANTS printk("sbic: %c%c%c%c%c%c ", asr & ASR_INT ? 'I' : 'i', asr & ASR_LCI ? 'L' : 'l', asr & ASR_BSY ? 'B' : 'b', asr & ASR_CIP ? 'C' : 'c', asr & ASR_PE ? 'P' : 'p', asr & ASR_DBR ? 'D' : 'd'); printk("scsi: "); print_scsi_status(ssr); printk(" ph %02X\n", cmdphase); #else printk("sbic: %02X scsi: %X:%X ph: %02X\n", asr, (ssr & 0xf0)>>4, ssr & 0x0f, cmdphase); #endif } static void acornscsi_dumplogline(AS_Host *host, int target, int line) { unsigned long prev; signed int ptr; ptr = host->status_ptr[target] - STATUS_BUFFER_TO_PRINT; if (ptr < 0) ptr += STATUS_BUFFER_SIZE; printk("%c: %3s:", target == 8 ? 'H' : '0' + target, line == 0 ? "ph" : line == 1 ? "ssr" : "int"); prev = host->status[target][ptr].when; for (; ptr != host->status_ptr[target]; ptr = (ptr + 1) & (STATUS_BUFFER_SIZE - 1)) { unsigned long time_diff; if (!host->status[target][ptr].when) continue; switch (line) { case 0: printk("%c%02X", host->status[target][ptr].irq ? '-' : ' ', host->status[target][ptr].ph); break; case 1: printk(" %02X", host->status[target][ptr].ssr); break; case 2: time_diff = host->status[target][ptr].when - prev; prev = host->status[target][ptr].when; if (time_diff == 0) printk("==^"); else if (time_diff >= 100) printk(" "); else printk(" %02ld", time_diff); break; } } printk("\n"); } static void acornscsi_dumplog(AS_Host *host, int target) { do { acornscsi_dumplogline(host, target, 0); acornscsi_dumplogline(host, target, 1); acornscsi_dumplogline(host, target, 2); if (target == 8) break; target = 8; } while (1); } static char acornscsi_target(AS_Host *host) { if (host->SCpnt) return '0' + host->SCpnt->device->id; return 'H'; } /* * Prototype: cmdtype_t acornscsi_cmdtype(int command) * Purpose : differentiate READ from WRITE from other commands * Params : command - command to interpret * Returns : CMD_READ - command reads data, * CMD_WRITE - command writes data, * CMD_MISC - everything else */ static inline cmdtype_t acornscsi_cmdtype(int command) { switch (command) { case WRITE_6: case WRITE_10: case WRITE_12: return CMD_WRITE; case READ_6: case READ_10: case READ_12: return CMD_READ; default: return CMD_MISC; } } /* * Prototype: int acornscsi_datadirection(int command) * Purpose : differentiate between commands that have a DATA IN phase * and a DATA OUT phase * Params : command - command to interpret * Returns : DATADIR_OUT - data out phase expected * DATADIR_IN - data in phase expected */ static datadir_t acornscsi_datadirection(int command) { switch (command) { case CHANGE_DEFINITION: case COMPARE: case COPY: case COPY_VERIFY: case LOG_SELECT: case MODE_SELECT: case MODE_SELECT_10: case SEND_DIAGNOSTIC: case WRITE_BUFFER: case FORMAT_UNIT: case REASSIGN_BLOCKS: case RESERVE: case SEARCH_EQUAL: case SEARCH_HIGH: case SEARCH_LOW: case WRITE_6: case WRITE_10: case WRITE_VERIFY: case UPDATE_BLOCK: case WRITE_LONG: case WRITE_SAME: case SEARCH_HIGH_12: case SEARCH_EQUAL_12: case SEARCH_LOW_12: case WRITE_12: case WRITE_VERIFY_12: case SET_WINDOW: case MEDIUM_SCAN: case SEND_VOLUME_TAG: case 0xea: return DATADIR_OUT; default: return DATADIR_IN; } } /* * Purpose : provide values for synchronous transfers with 33C93. * Copyright: Copyright (c) 1996 John Shifflett, GeoLog Consulting * Modified by Russell King for 8MHz WD33C93A */ static struct sync_xfer_tbl { unsigned int period_ns; unsigned char reg_value; } sync_xfer_table[] = { { 1, 0x20 }, { 249, 0x20 }, { 374, 0x30 }, { 499, 0x40 }, { 624, 0x50 }, { 749, 0x60 }, { 874, 0x70 }, { 999, 0x00 }, { 0, 0 } }; /* * Prototype: int acornscsi_getperiod(unsigned char syncxfer) * Purpose : period for the synchronous transfer setting * Params : syncxfer SYNCXFER register value * Returns : period in ns. */ static int acornscsi_getperiod(unsigned char syncxfer) { int i; syncxfer &= 0xf0; if (syncxfer == 0x10) syncxfer = 0; for (i = 1; sync_xfer_table[i].period_ns; i++) if (syncxfer == sync_xfer_table[i].reg_value) return sync_xfer_table[i].period_ns; return 0; } /* * Prototype: int round_period(unsigned int period) * Purpose : return index into above table for a required REQ period * Params : period - time (ns) for REQ * Returns : table index * Copyright: Copyright (c) 1996 John Shifflett, GeoLog Consulting */ static inline int round_period(unsigned int period) { int i; for (i = 1; sync_xfer_table[i].period_ns; i++) { if ((period <= sync_xfer_table[i].period_ns) && (period > sync_xfer_table[i - 1].period_ns)) return i; } return 7; } /* * Prototype: unsigned char calc_sync_xfer(unsigned int period, unsigned int offset) * Purpose : calculate value for 33c93s SYNC register * Params : period - time (ns) for REQ * offset - offset in bytes between REQ/ACK * Returns : value for SYNC register * Copyright: Copyright (c) 1996 John Shifflett, GeoLog Consulting */ static unsigned char __maybe_unused calc_sync_xfer(unsigned int period, unsigned int offset) { return sync_xfer_table[round_period(period)].reg_value | ((offset < SDTR_SIZE) ? offset : SDTR_SIZE); } /* ==================================================================================== * Command functions */ /* * Function: acornscsi_kick(AS_Host *host) * Purpose : kick next command to interface * Params : host - host to send command to * Returns : INTR_IDLE if idle, otherwise INTR_PROCESSING * Notes : interrupts are always disabled! */ static intr_ret_t acornscsi_kick(AS_Host *host) { int from_queue = 0; struct scsi_cmnd *SCpnt; /* first check to see if a command is waiting to be executed */ SCpnt = host->origSCpnt; host->origSCpnt = NULL; /* retrieve next command */ if (!SCpnt) { SCpnt = queue_remove_exclude(&host->queues.issue, host->busyluns); if (!SCpnt) return INTR_IDLE; from_queue = 1; } if (host->scsi.disconnectable && host->SCpnt) { queue_add_cmd_tail(&host->queues.disconnected, host->SCpnt); host->scsi.disconnectable = 0; #if (DEBUG & (DEBUG_QUEUES|DEBUG_DISCON)) DBG(host->SCpnt, printk("scsi%d.%c: moved command to disconnected queue\n", host->host->host_no, acornscsi_target(host))); #endif host->SCpnt = NULL; } /* * If we have an interrupt pending, then we may have been reselected. * In this case, we don't want to write to the registers */ if (!(sbic_arm_read(host, SBIC_ASR) & (ASR_INT|ASR_BSY|ASR_CIP))) { sbic_arm_write(host, SBIC_DESTID, SCpnt->device->id); sbic_arm_write(host, SBIC_CMND, CMND_SELWITHATN); } /* * claim host busy - all of these must happen atomically wrt * our interrupt routine. Failure means command loss. */ host->scsi.phase = PHASE_CONNECTING; host->SCpnt = SCpnt; host->scsi.SCp = *arm_scsi_pointer(SCpnt); host->dma.xfer_setup = 0; host->dma.xfer_required = 0; host->dma.xfer_done = 0; #if (DEBUG & (DEBUG_ABORT|DEBUG_CONNECT)) DBG(SCpnt,printk("scsi%d.%c: starting cmd %02X\n", host->host->host_no, '0' + SCpnt->device->id, SCpnt->cmnd[0])); #endif if (from_queue) { set_bit(SCpnt->device->id * 8 + (u8)(SCpnt->device->lun & 0x07), host->busyluns); host->stats.removes += 1; switch (acornscsi_cmdtype(SCpnt->cmnd[0])) { case CMD_WRITE: host->stats.writes += 1; break; case CMD_READ: host->stats.reads += 1; break; case CMD_MISC: host->stats.miscs += 1; break; } } return INTR_PROCESSING; } /* * Function: void acornscsi_done(AS_Host *host, struct scsi_cmnd **SCpntp, unsigned int result) * Purpose : complete processing for command * Params : host - interface that completed * result - driver byte of result */ static void acornscsi_done(AS_Host *host, struct scsi_cmnd **SCpntp, unsigned int result) { struct scsi_cmnd *SCpnt = *SCpntp; /* clean up */ sbic_arm_write(host, SBIC_SOURCEID, SOURCEID_ER | SOURCEID_DSP); host->stats.fins += 1; if (SCpnt) { *SCpntp = NULL; acornscsi_dma_cleanup(host); set_host_byte(SCpnt, result); if (result == DID_OK) scsi_msg_to_host_byte(SCpnt, host->scsi.SCp.Message); set_status_byte(SCpnt, host->scsi.SCp.Status); /* * In theory, this should not happen. In practice, it seems to. * Only trigger an error if the device attempts to report all happy * but with untransferred buffers... If we don't do something, then * data loss will occur. Should we check SCpnt->underflow here? * It doesn't appear to be set to something meaningful by the higher * levels all the time. */ if (result == DID_OK) { int xfer_warn = 0; if (SCpnt->underflow == 0) { if (host->scsi.SCp.ptr && acornscsi_cmdtype(SCpnt->cmnd[0]) != CMD_MISC) xfer_warn = 1; } else { if (host->scsi.SCp.scsi_xferred < SCpnt->underflow || host->scsi.SCp.scsi_xferred != host->dma.transferred) xfer_warn = 1; } /* ANSI standard says: (SCSI-2 Rev 10c Sect 5.6.6) * Targets which break data transfers into multiple * connections shall end each successful connection * (except possibly the last) with a SAVE DATA * POINTER - DISCONNECT message sequence. * * This makes it difficult to ensure that a transfer has * completed. If we reach the end of a transfer during * the command, then we can only have finished the transfer. * therefore, if we seem to have some data remaining, this * is not a problem. */ if (host->dma.xfer_done) xfer_warn = 0; if (xfer_warn) { switch (get_status_byte(SCpnt)) { case SAM_STAT_CHECK_CONDITION: case SAM_STAT_COMMAND_TERMINATED: case SAM_STAT_BUSY: case SAM_STAT_TASK_SET_FULL: case SAM_STAT_RESERVATION_CONFLICT: break; default: scmd_printk(KERN_ERR, SCpnt, "incomplete data transfer detected: " "result=%08X", SCpnt->result); scsi_print_command(SCpnt); acornscsi_dumpdma(host, "done"); acornscsi_dumplog(host, SCpnt->device->id); set_host_byte(SCpnt, DID_ERROR); } } } clear_bit(SCpnt->device->id * 8 + (u8)(SCpnt->device->lun & 0x7), host->busyluns); scsi_done(SCpnt); } else printk("scsi%d: null command in acornscsi_done", host->host->host_no); host->scsi.phase = PHASE_IDLE; } /* ==================================================================================== * DMA routines */ /* * Purpose : update SCSI Data Pointer * Notes : this will only be one SG entry or less */ static void acornscsi_data_updateptr(AS_Host *host, struct scsi_pointer *SCp, unsigned int length) { SCp->ptr += length; SCp->this_residual -= length; if (SCp->this_residual == 0 && next_SCp(SCp) == 0) host->dma.xfer_done = 1; } /* * Prototype: void acornscsi_data_read(AS_Host *host, char *ptr, * unsigned int start_addr, unsigned int length) * Purpose : read data from DMA RAM * Params : host - host to transfer from * ptr - DRAM address * start_addr - host mem address * length - number of bytes to transfer * Notes : this will only be one SG entry or less */ static void acornscsi_data_read(AS_Host *host, char *ptr, unsigned int start_addr, unsigned int length) { extern void __acornscsi_in(void __iomem *, char *buf, int len); unsigned int page, offset, len = length; page = (start_addr >> 12); offset = start_addr & ((1 << 12) - 1); writeb((page & 0x3f) | host->card.page_reg, host->fast + PAGE_REG); while (len > 0) { unsigned int this_len; if (len + offset > (1 << 12)) this_len = (1 << 12) - offset; else this_len = len; __acornscsi_in(host->base + (offset << 1), ptr, this_len); offset += this_len; ptr += this_len; len -= this_len; if (offset == (1 << 12)) { offset = 0; page ++; writeb((page & 0x3f) | host->card.page_reg, host->fast + PAGE_REG); } } writeb(host->card.page_reg, host->fast + PAGE_REG); } /* * Prototype: void acornscsi_data_write(AS_Host *host, char *ptr, * unsigned int start_addr, unsigned int length) * Purpose : write data to DMA RAM * Params : host - host to transfer from * ptr - DRAM address * start_addr - host mem address * length - number of bytes to transfer * Notes : this will only be one SG entry or less */ static void acornscsi_data_write(AS_Host *host, char *ptr, unsigned int start_addr, unsigned int length) { extern void __acornscsi_out(void __iomem *, char *buf, int len); unsigned int page, offset, len = length; page = (start_addr >> 12); offset = start_addr & ((1 << 12) - 1); writeb((page & 0x3f) | host->card.page_reg, host->fast + PAGE_REG); while (len > 0) { unsigned int this_len; if (len + offset > (1 << 12)) this_len = (1 << 12) - offset; else this_len = len; __acornscsi_out(host->base + (offset << 1), ptr, this_len); offset += this_len; ptr += this_len; len -= this_len; if (offset == (1 << 12)) { offset = 0; page ++; writeb((page & 0x3f) | host->card.page_reg, host->fast + PAGE_REG); } } writeb(host->card.page_reg, host->fast + PAGE_REG); } /* ========================================================================================= * On-board DMA routines */ #ifdef USE_DMAC /* * Prototype: void acornscsi_dmastop(AS_Host *host) * Purpose : stop all DMA * Params : host - host on which to stop DMA * Notes : This is called when leaving DATA IN/OUT phase, * or when interface is RESET */ static inline void acornscsi_dma_stop(AS_Host *host) { dmac_write(host, DMAC_MASKREG, MASK_ON); dmac_clearintr(host); #if (DEBUG & DEBUG_DMA) DBG(host->SCpnt, acornscsi_dumpdma(host, "stop")); #endif } /* * Function: void acornscsi_dma_setup(AS_Host *host, dmadir_t direction) * Purpose : setup DMA controller for data transfer * Params : host - host to setup * direction - data transfer direction * Notes : This is called when entering DATA I/O phase, not * while we're in a DATA I/O phase */ static void acornscsi_dma_setup(AS_Host *host, dmadir_t direction) { unsigned int address, length, mode; host->dma.direction = direction; dmac_write(host, DMAC_MASKREG, MASK_ON); if (direction == DMA_OUT) { #if (DEBUG & DEBUG_NO_WRITE) if (NO_WRITE & (1 << host->SCpnt->device->id)) { printk(KERN_CRIT "scsi%d.%c: I can't handle DMA_OUT!\n", host->host->host_no, acornscsi_target(host)); return; } #endif mode = DMAC_WRITE; } else mode = DMAC_READ; /* * Allocate some buffer space, limited to half the buffer size */ length = min_t(unsigned int, host->scsi.SCp.this_residual, DMAC_BUFFER_SIZE / 2); if (length) { host->dma.start_addr = address = host->dma.free_addr; host->dma.free_addr = (host->dma.free_addr + length) & (DMAC_BUFFER_SIZE - 1); /* * Transfer data to DMA memory */ if (direction == DMA_OUT) acornscsi_data_write(host, host->scsi.SCp.ptr, host->dma.start_addr, length); length -= 1; dmac_write(host, DMAC_TXCNTLO, length); dmac_write(host, DMAC_TXCNTHI, length >> 8); dmac_write(host, DMAC_TXADRLO, address); dmac_write(host, DMAC_TXADRMD, address >> 8); dmac_write(host, DMAC_TXADRHI, 0); dmac_write(host, DMAC_MODECON, mode); dmac_write(host, DMAC_MASKREG, MASK_OFF); #if (DEBUG & DEBUG_DMA) DBG(host->SCpnt, acornscsi_dumpdma(host, "strt")); #endif host->dma.xfer_setup = 1; } } /* * Function: void acornscsi_dma_cleanup(AS_Host *host) * Purpose : ensure that all DMA transfers are up-to-date & host->scsi.SCp is correct * Params : host - host to finish * Notes : This is called when a command is: * terminating, RESTORE_POINTERS, SAVE_POINTERS, DISCONNECT * : This must not return until all transfers are completed. */ static void acornscsi_dma_cleanup(AS_Host *host) { dmac_write(host, DMAC_MASKREG, MASK_ON); dmac_clearintr(host); /* * Check for a pending transfer */ if (host->dma.xfer_required) { host->dma.xfer_required = 0; if (host->dma.direction == DMA_IN) acornscsi_data_read(host, host->dma.xfer_ptr, host->dma.xfer_start, host->dma.xfer_length); } /* * Has a transfer been setup? */ if (host->dma.xfer_setup) { unsigned int transferred; host->dma.xfer_setup = 0; #if (DEBUG & DEBUG_DMA) DBG(host->SCpnt, acornscsi_dumpdma(host, "cupi")); #endif /* * Calculate number of bytes transferred from DMA. */ transferred = dmac_address(host) - host->dma.start_addr; host->dma.transferred += transferred; if (host->dma.direction == DMA_IN) acornscsi_data_read(host, host->scsi.SCp.ptr, host->dma.start_addr, transferred); /* * Update SCSI pointers */ acornscsi_data_updateptr(host, &host->scsi.SCp, transferred); #if (DEBUG & DEBUG_DMA) DBG(host->SCpnt, acornscsi_dumpdma(host, "cupo")); #endif } } /* * Function: void acornscsi_dmacintr(AS_Host *host) * Purpose : handle interrupts from DMAC device * Params : host - host to process * Notes : If reading, we schedule the read to main memory & * allow the transfer to continue. * : If writing, we fill the onboard DMA memory from main * memory. * : Called whenever DMAC finished it's current transfer. */ static void acornscsi_dma_intr(AS_Host *host) { unsigned int address, length, transferred; #if (DEBUG & DEBUG_DMA) DBG(host->SCpnt, acornscsi_dumpdma(host, "inti")); #endif dmac_write(host, DMAC_MASKREG, MASK_ON); dmac_clearintr(host); /* * Calculate amount transferred via DMA */ transferred = dmac_address(host) - host->dma.start_addr; host->dma.transferred += transferred; /* * Schedule DMA transfer off board */ if (host->dma.direction == DMA_IN) { host->dma.xfer_start = host->dma.start_addr; host->dma.xfer_length = transferred; host->dma.xfer_ptr = host->scsi.SCp.ptr; host->dma.xfer_required = 1; } acornscsi_data_updateptr(host, &host->scsi.SCp, transferred); /* * Allocate some buffer space, limited to half the on-board RAM size */ length = min_t(unsigned int, host->scsi.SCp.this_residual, DMAC_BUFFER_SIZE / 2); if (length) { host->dma.start_addr = address = host->dma.free_addr; host->dma.free_addr = (host->dma.free_addr + length) & (DMAC_BUFFER_SIZE - 1); /* * Transfer data to DMA memory */ if (host->dma.direction == DMA_OUT) acornscsi_data_write(host, host->scsi.SCp.ptr, host->dma.start_addr, length); length -= 1; dmac_write(host, DMAC_TXCNTLO, length); dmac_write(host, DMAC_TXCNTHI, length >> 8); dmac_write(host, DMAC_TXADRLO, address); dmac_write(host, DMAC_TXADRMD, address >> 8); dmac_write(host, DMAC_TXADRHI, 0); dmac_write(host, DMAC_MASKREG, MASK_OFF); #if (DEBUG & DEBUG_DMA) DBG(host->SCpnt, acornscsi_dumpdma(host, "into")); #endif } else { host->dma.xfer_setup = 0; #if 0 /* * If the interface still wants more, then this is an error. * We give it another byte, but we also attempt to raise an * attention condition. We continue giving one byte until * the device recognises the attention. */ if (dmac_read(host, DMAC_STATUS) & STATUS_RQ0) { acornscsi_abortcmd(host); dmac_write(host, DMAC_TXCNTLO, 0); dmac_write(host, DMAC_TXCNTHI, 0); dmac_write(host, DMAC_TXADRLO, 0); dmac_write(host, DMAC_TXADRMD, 0); dmac_write(host, DMAC_TXADRHI, 0); dmac_write(host, DMAC_MASKREG, MASK_OFF); } #endif } } /* * Function: void acornscsi_dma_xfer(AS_Host *host) * Purpose : transfer data between AcornSCSI and memory * Params : host - host to process */ static void acornscsi_dma_xfer(AS_Host *host) { host->dma.xfer_required = 0; if (host->dma.direction == DMA_IN) acornscsi_data_read(host, host->dma.xfer_ptr, host->dma.xfer_start, host->dma.xfer_length); } /* * Function: void acornscsi_dma_adjust(AS_Host *host) * Purpose : adjust DMA pointers & count for bytes transferred to * SBIC but not SCSI bus. * Params : host - host to adjust DMA count for */ static void acornscsi_dma_adjust(AS_Host *host) { if (host->dma.xfer_setup) { signed long transferred; #if (DEBUG & (DEBUG_DMA|DEBUG_WRITE)) DBG(host->SCpnt, acornscsi_dumpdma(host, "adji")); #endif /* * Calculate correct DMA address - DMA is ahead of SCSI bus while * writing. * host->scsi.SCp.scsi_xferred is the number of bytes * actually transferred to/from the SCSI bus. * host->dma.transferred is the number of bytes transferred * over DMA since host->dma.start_addr was last set. * * real_dma_addr = host->dma.start_addr + host->scsi.SCp.scsi_xferred * - host->dma.transferred */ transferred = host->scsi.SCp.scsi_xferred - host->dma.transferred; if (transferred < 0) printk("scsi%d.%c: Ack! DMA write correction %ld < 0!\n", host->host->host_no, acornscsi_target(host), transferred); else if (transferred == 0) host->dma.xfer_setup = 0; else { transferred += host->dma.start_addr; dmac_write(host, DMAC_TXADRLO, transferred); dmac_write(host, DMAC_TXADRMD, transferred >> 8); dmac_write(host, DMAC_TXADRHI, transferred >> 16); #if (DEBUG & (DEBUG_DMA|DEBUG_WRITE)) DBG(host->SCpnt, acornscsi_dumpdma(host, "adjo")); #endif } } } #endif /* ========================================================================================= * Data I/O */ static int acornscsi_write_pio(AS_Host *host, char *bytes, int *ptr, int len, unsigned int max_timeout) { unsigned int asr, timeout = max_timeout; int my_ptr = *ptr; while (my_ptr < len) { asr = sbic_arm_read(host, SBIC_ASR); if (asr & ASR_DBR) { timeout = max_timeout; sbic_arm_write(host, SBIC_DATA, bytes[my_ptr++]); } else if (asr & ASR_INT) break; else if (--timeout == 0) break; udelay(1); } *ptr = my_ptr; return (timeout == 0) ? -1 : 0; } /* * Function: void acornscsi_sendcommand(AS_Host *host) * Purpose : send a command to a target * Params : host - host which is connected to target */ static void acornscsi_sendcommand(AS_Host *host) { struct scsi_cmnd *SCpnt = host->SCpnt; sbic_arm_write(host, SBIC_TRANSCNTH, 0); sbic_arm_writenext(host, 0); sbic_arm_writenext(host, SCpnt->cmd_len - host->scsi.SCp.sent_command); acornscsi_sbic_issuecmd(host, CMND_XFERINFO); if (acornscsi_write_pio(host, SCpnt->cmnd, (int *)&host->scsi.SCp.sent_command, SCpnt->cmd_len, 1000000)) printk("scsi%d: timeout while sending command\n", host->host->host_no); host->scsi.phase = PHASE_COMMAND; } static void acornscsi_sendmessage(AS_Host *host) { unsigned int message_length = msgqueue_msglength(&host->scsi.msgs); unsigned int msgnr; struct message *msg; #if (DEBUG & DEBUG_MESSAGES) printk("scsi%d.%c: sending message ", host->host->host_no, acornscsi_target(host)); #endif switch (message_length) { case 0: acornscsi_sbic_issuecmd(host, CMND_XFERINFO | CMND_SBT); acornscsi_sbic_wait(host, ASR_DBR, ASR_DBR, 1000, "sending message 1"); sbic_arm_write(host, SBIC_DATA, NOP); host->scsi.last_message = NOP; #if (DEBUG & DEBUG_MESSAGES) printk("NOP"); #endif break; case 1: acornscsi_sbic_issuecmd(host, CMND_XFERINFO | CMND_SBT); msg = msgqueue_getmsg(&host->scsi.msgs, 0); acornscsi_sbic_wait(host, ASR_DBR, ASR_DBR, 1000, "sending message 2"); sbic_arm_write(host, SBIC_DATA, msg->msg[0]); host->scsi.last_message = msg->msg[0]; #if (DEBUG & DEBUG_MESSAGES) spi_print_msg(msg->msg); #endif break; default: /* * ANSI standard says: (SCSI-2 Rev 10c Sect 5.6.14) * 'When a target sends this (MESSAGE_REJECT) message, it * shall change to MESSAGE IN phase and send this message * prior to requesting additional message bytes from the * initiator. This provides an interlock so that the * initiator can determine which message byte is rejected. */ sbic_arm_write(host, SBIC_TRANSCNTH, 0); sbic_arm_writenext(host, 0); sbic_arm_writenext(host, message_length); acornscsi_sbic_issuecmd(host, CMND_XFERINFO); msgnr = 0; while ((msg = msgqueue_getmsg(&host->scsi.msgs, msgnr++)) != NULL) { unsigned int i; #if (DEBUG & DEBUG_MESSAGES) spi_print_msg(msg); #endif i = 0; if (acornscsi_write_pio(host, msg->msg, &i, msg->length, 1000000)) printk("scsi%d: timeout while sending message\n", host->host->host_no); host->scsi.last_message = msg->msg[0]; if (msg->msg[0] == EXTENDED_MESSAGE) host->scsi.last_message |= msg->msg[2] << 8; if (i != msg->length) break; } break; } #if (DEBUG & DEBUG_MESSAGES) printk("\n"); #endif } /* * Function: void acornscsi_readstatusbyte(AS_Host *host) * Purpose : Read status byte from connected target * Params : host - host connected to target */ static void acornscsi_readstatusbyte(AS_Host *host) { acornscsi_sbic_issuecmd(host, CMND_XFERINFO|CMND_SBT); acornscsi_sbic_wait(host, ASR_DBR, ASR_DBR, 1000, "reading status byte"); host->scsi.SCp.Status = sbic_arm_read(host, SBIC_DATA); } /* * Function: unsigned char acornscsi_readmessagebyte(AS_Host *host) * Purpose : Read one message byte from connected target * Params : host - host connected to target */ static unsigned char acornscsi_readmessagebyte(AS_Host *host) { unsigned char message; acornscsi_sbic_issuecmd(host, CMND_XFERINFO | CMND_SBT); acornscsi_sbic_wait(host, ASR_DBR, ASR_DBR, 1000, "for message byte"); message = sbic_arm_read(host, SBIC_DATA); /* wait for MSGIN-XFER-PAUSED */ acornscsi_sbic_wait(host, ASR_INT, ASR_INT, 1000, "for interrupt after message byte"); sbic_arm_read(host, SBIC_SSR); return message; } /* * Function: void acornscsi_message(AS_Host *host) * Purpose : Read complete message from connected target & action message * Params : host - host connected to target */ static void acornscsi_message(AS_Host *host) { struct scsi_pointer *scsi_pointer; unsigned char message[16]; unsigned int msgidx = 0, msglen = 1; do { message[msgidx] = acornscsi_readmessagebyte(host); switch (msgidx) { case 0: if (message[0] == EXTENDED_MESSAGE || (message[0] >= 0x20 && message[0] <= 0x2f)) msglen = 2; break; case 1: if (message[0] == EXTENDED_MESSAGE) msglen += message[msgidx]; break; } msgidx += 1; if (msgidx < msglen) { acornscsi_sbic_issuecmd(host, CMND_NEGATEACK); /* wait for next msg-in */ acornscsi_sbic_wait(host, ASR_INT, ASR_INT, 1000, "for interrupt after negate ack"); sbic_arm_read(host, SBIC_SSR); } } while (msgidx < msglen); #if (DEBUG & DEBUG_MESSAGES) printk("scsi%d.%c: message in: ", host->host->host_no, acornscsi_target(host)); spi_print_msg(message); printk("\n"); #endif if (host->scsi.phase == PHASE_RECONNECTED) { /* * ANSI standard says: (Section SCSI-2 Rev. 10c Sect 5.6.17) * 'Whenever a target reconnects to an initiator to continue * a tagged I/O process, the SIMPLE QUEUE TAG message shall * be sent immediately following the IDENTIFY message...' */ if (message[0] == SIMPLE_QUEUE_TAG) host->scsi.reconnected.tag = message[1]; if (acornscsi_reconnect_finish(host)) host->scsi.phase = PHASE_MSGIN; } switch (message[0]) { case ABORT_TASK_SET: case ABORT_TASK: case COMMAND_COMPLETE: if (host->scsi.phase != PHASE_STATUSIN) { printk(KERN_ERR "scsi%d.%c: command complete following non-status in phase?\n", host->host->host_no, acornscsi_target(host)); acornscsi_dumplog(host, host->SCpnt->device->id); } host->scsi.phase = PHASE_DONE; host->scsi.SCp.Message = message[0]; break; case SAVE_POINTERS: /* * ANSI standard says: (Section SCSI-2 Rev. 10c Sect 5.6.20) * 'The SAVE DATA POINTER message is sent from a target to * direct the initiator to copy the active data pointer to * the saved data pointer for the current I/O process. */ acornscsi_dma_cleanup(host); scsi_pointer = arm_scsi_pointer(host->SCpnt); *scsi_pointer = host->scsi.SCp; scsi_pointer->sent_command = 0; host->scsi.phase = PHASE_MSGIN; break; case RESTORE_POINTERS: /* * ANSI standard says: (Section SCSI-2 Rev. 10c Sect 5.6.19) * 'The RESTORE POINTERS message is sent from a target to * direct the initiator to copy the most recently saved * command, data, and status pointers for the I/O process * to the corresponding active pointers. The command and * status pointers shall be restored to the beginning of * the present command and status areas.' */ acornscsi_dma_cleanup(host); host->scsi.SCp = *arm_scsi_pointer(host->SCpnt); host->scsi.phase = PHASE_MSGIN; break; case DISCONNECT: /* * ANSI standard says: (Section SCSI-2 Rev. 10c Sect 6.4.2) * 'On those occasions when an error or exception condition occurs * and the target elects to repeat the information transfer, the * target may repeat the transfer either issuing a RESTORE POINTERS * message or by disconnecting without issuing a SAVE POINTERS * message. When reconnection is completed, the most recent * saved pointer values are restored.' */ acornscsi_dma_cleanup(host); host->scsi.phase = PHASE_DISCONNECT; break; case MESSAGE_REJECT: #if 0 /* this isn't needed any more */ /* * If we were negociating sync transfer, we don't yet know if * this REJECT is for the sync transfer or for the tagged queue/wide * transfer. Re-initiate sync transfer negotiation now, and if * we got a REJECT in response to SDTR, then it'll be set to DONE. */ if (host->device[host->SCpnt->device->id].sync_state == SYNC_SENT_REQUEST) host->device[host->SCpnt->device->id].sync_state = SYNC_NEGOCIATE; #endif /* * If we have any messages waiting to go out, then assert ATN now */ if (msgqueue_msglength(&host->scsi.msgs)) acornscsi_sbic_issuecmd(host, CMND_ASSERTATN); switch (host->scsi.last_message) { case EXTENDED_MESSAGE | (EXTENDED_SDTR << 8): /* * Target can't handle synchronous transfers */ printk(KERN_NOTICE "scsi%d.%c: Using asynchronous transfer\n", host->host->host_no, acornscsi_target(host)); host->device[host->SCpnt->device->id].sync_xfer = SYNCHTRANSFER_2DBA; host->device[host->SCpnt->device->id].sync_state = SYNC_ASYNCHRONOUS; sbic_arm_write(host, SBIC_SYNCHTRANSFER, host->device[host->SCpnt->device->id].sync_xfer); break; default: break; } break; case SIMPLE_QUEUE_TAG: /* tag queue reconnect... message[1] = queue tag. Print something to indicate something happened! */ printk("scsi%d.%c: reconnect queue tag %02X\n", host->host->host_no, acornscsi_target(host), message[1]); break; case EXTENDED_MESSAGE: switch (message[2]) { #ifdef CONFIG_SCSI_ACORNSCSI_SYNC case EXTENDED_SDTR: if (host->device[host->SCpnt->device->id].sync_state == SYNC_SENT_REQUEST) { /* * We requested synchronous transfers. This isn't quite right... * We can only say if this succeeded if we proceed on to execute the * command from this message. If we get a MESSAGE PARITY ERROR, * and the target retries fail, then we fallback to asynchronous mode */ host->device[host->SCpnt->device->id].sync_state = SYNC_COMPLETED; printk(KERN_NOTICE "scsi%d.%c: Using synchronous transfer, offset %d, %d ns\n", host->host->host_no, acornscsi_target(host), message[4], message[3] * 4); host->device[host->SCpnt->device->id].sync_xfer = calc_sync_xfer(message[3] * 4, message[4]); } else { unsigned char period, length; /* * Target requested synchronous transfers. The agreement is only * to be in operation AFTER the target leaves message out phase. */ acornscsi_sbic_issuecmd(host, CMND_ASSERTATN); period = max_t(unsigned int, message[3], sdtr_period / 4); length = min_t(unsigned int, message[4], sdtr_size); msgqueue_addmsg(&host->scsi.msgs, 5, EXTENDED_MESSAGE, 3, EXTENDED_SDTR, period, length); host->device[host->SCpnt->device->id].sync_xfer = calc_sync_xfer(period * 4, length); } sbic_arm_write(host, SBIC_SYNCHTRANSFER, host->device[host->SCpnt->device->id].sync_xfer); break; #else /* We do not accept synchronous transfers. Respond with a * MESSAGE_REJECT. */ #endif case EXTENDED_WDTR: /* The WD33C93A is only 8-bit. We respond with a MESSAGE_REJECT * to a wide data transfer request. */ default: acornscsi_sbic_issuecmd(host, CMND_ASSERTATN); msgqueue_flush(&host->scsi.msgs); msgqueue_addmsg(&host->scsi.msgs, 1, MESSAGE_REJECT); break; } break; default: /* reject message */ printk(KERN_ERR "scsi%d.%c: unrecognised message %02X, rejecting\n", host->host->host_no, acornscsi_target(host), message[0]); acornscsi_sbic_issuecmd(host, CMND_ASSERTATN); msgqueue_flush(&host->scsi.msgs); msgqueue_addmsg(&host->scsi.msgs, 1, MESSAGE_REJECT); host->scsi.phase = PHASE_MSGIN; break; } acornscsi_sbic_issuecmd(host, CMND_NEGATEACK); } /* * Function: int acornscsi_buildmessages(AS_Host *host) * Purpose : build the connection messages for a host * Params : host - host to add messages to */ static void acornscsi_buildmessages(AS_Host *host) { #if 0 /* does the device need resetting? */ if (cmd_reset) { msgqueue_addmsg(&host->scsi.msgs, 1, BUS_DEVICE_RESET); return; } #endif msgqueue_addmsg(&host->scsi.msgs, 1, IDENTIFY(host->device[host->SCpnt->device->id].disconnect_ok, host->SCpnt->device->lun)); #if 0 /* does the device need the current command aborted */ if (cmd_aborted) { acornscsi_abortcmd(host); return; } #endif #ifdef CONFIG_SCSI_ACORNSCSI_SYNC if (host->device[host->SCpnt->device->id].sync_state == SYNC_NEGOCIATE) { host->device[host->SCpnt->device->id].sync_state = SYNC_SENT_REQUEST; msgqueue_addmsg(&host->scsi.msgs, 5, EXTENDED_MESSAGE, 3, EXTENDED_SDTR, sdtr_period / 4, sdtr_size); } #endif } /* * Function: int acornscsi_starttransfer(AS_Host *host) * Purpose : transfer data to/from connected target * Params : host - host to which target is connected * Returns : 0 if failure */ static int acornscsi_starttransfer(AS_Host *host) { int residual; if (!host->scsi.SCp.ptr /*&& host->scsi.SCp.this_residual*/) { printk(KERN_ERR "scsi%d.%c: null buffer passed to acornscsi_starttransfer\n", host->host->host_no, acornscsi_target(host)); return 0; } residual = scsi_bufflen(host->SCpnt) - host->scsi.SCp.scsi_xferred; sbic_arm_write(host, SBIC_SYNCHTRANSFER, host->device[host->SCpnt->device->id].sync_xfer); sbic_arm_writenext(host, residual >> 16); sbic_arm_writenext(host, residual >> 8); sbic_arm_writenext(host, residual); acornscsi_sbic_issuecmd(host, CMND_XFERINFO); return 1; } /* ========================================================================================= * Connection & Disconnection */ /* * Function : acornscsi_reconnect(AS_Host *host) * Purpose : reconnect a previously disconnected command * Params : host - host specific data * Remarks : SCSI spec says: * 'The set of active pointers is restored from the set * of saved pointers upon reconnection of the I/O process' */ static int acornscsi_reconnect(AS_Host *host) { unsigned int target, lun, ok = 0; target = sbic_arm_read(host, SBIC_SOURCEID); if (!(target & 8)) printk(KERN_ERR "scsi%d: invalid source id after reselection " "- device fault?\n", host->host->host_no); target &= 7; if (host->SCpnt && !host->scsi.disconnectable) { printk(KERN_ERR "scsi%d.%d: reconnected while command in " "progress to target %d?\n", host->host->host_no, target, host->SCpnt->device->id); host->SCpnt = NULL; } lun = sbic_arm_read(host, SBIC_DATA) & 7; host->scsi.reconnected.target = target; host->scsi.reconnected.lun = lun; host->scsi.reconnected.tag = 0; if (host->scsi.disconnectable && host->SCpnt && host->SCpnt->device->id == target && host->SCpnt->device->lun == lun) ok = 1; if (!ok && queue_probetgtlun(&host->queues.disconnected, target, lun)) ok = 1; ADD_STATUS(target, 0x81, host->scsi.phase, 0); if (ok) { host->scsi.phase = PHASE_RECONNECTED; } else { /* this doesn't seem to work */ printk(KERN_ERR "scsi%d.%c: reselected with no command " "to reconnect with\n", host->host->host_no, '0' + target); acornscsi_dumplog(host, target); acornscsi_abortcmd(host); if (host->SCpnt) { queue_add_cmd_tail(&host->queues.disconnected, host->SCpnt); host->SCpnt = NULL; } } acornscsi_sbic_issuecmd(host, CMND_NEGATEACK); return !ok; } /* * Function: int acornscsi_reconnect_finish(AS_Host *host) * Purpose : finish reconnecting a command * Params : host - host to complete * Returns : 0 if failed */ static int acornscsi_reconnect_finish(AS_Host *host) { if (host->scsi.disconnectable && host->SCpnt) { host->scsi.disconnectable = 0; if (host->SCpnt->device->id == host->scsi.reconnected.target && host->SCpnt->device->lun == host->scsi.reconnected.lun && scsi_cmd_to_rq(host->SCpnt)->tag == host->scsi.reconnected.tag) { #if (DEBUG & (DEBUG_QUEUES|DEBUG_DISCON)) DBG(host->SCpnt, printk("scsi%d.%c: reconnected", host->host->host_no, acornscsi_target(host))); #endif } else { queue_add_cmd_tail(&host->queues.disconnected, host->SCpnt); #if (DEBUG & (DEBUG_QUEUES|DEBUG_DISCON)) DBG(host->SCpnt, printk("scsi%d.%c: had to move command " "to disconnected queue\n", host->host->host_no, acornscsi_target(host))); #endif host->SCpnt = NULL; } } if (!host->SCpnt) { host->SCpnt = queue_remove_tgtluntag(&host->queues.disconnected, host->scsi.reconnected.target, host->scsi.reconnected.lun, host->scsi.reconnected.tag); #if (DEBUG & (DEBUG_QUEUES|DEBUG_DISCON)) DBG(host->SCpnt, printk("scsi%d.%c: had to get command", host->host->host_no, acornscsi_target(host))); #endif } if (!host->SCpnt) acornscsi_abortcmd(host); else { /* * Restore data pointer from SAVED pointers. */ host->scsi.SCp = *arm_scsi_pointer(host->SCpnt); #if (DEBUG & (DEBUG_QUEUES|DEBUG_DISCON)) printk(", data pointers: [%p, %X]", host->scsi.SCp.ptr, host->scsi.SCp.this_residual); #endif } #if (DEBUG & (DEBUG_QUEUES|DEBUG_DISCON)) printk("\n"); #endif host->dma.transferred = host->scsi.SCp.scsi_xferred; return host->SCpnt != NULL; } /* * Function: void acornscsi_disconnect_unexpected(AS_Host *host) * Purpose : handle an unexpected disconnect * Params : host - host on which disconnect occurred */ static void acornscsi_disconnect_unexpected(AS_Host *host) { printk(KERN_ERR "scsi%d.%c: unexpected disconnect\n", host->host->host_no, acornscsi_target(host)); #if (DEBUG & DEBUG_ABORT) acornscsi_dumplog(host, 8); #endif acornscsi_done(host, &host->SCpnt, DID_ERROR); } /* * Function: void acornscsi_abortcmd(AS_host *host, unsigned char tag) * Purpose : abort a currently executing command * Params : host - host with connected command to abort */ static void acornscsi_abortcmd(AS_Host *host) { host->scsi.phase = PHASE_ABORTED; sbic_arm_write(host, SBIC_CMND, CMND_ASSERTATN); msgqueue_flush(&host->scsi.msgs); msgqueue_addmsg(&host->scsi.msgs, 1, ABORT); } /* ========================================================================================== * Interrupt routines. */ /* * Function: int acornscsi_sbicintr(AS_Host *host) * Purpose : handle interrupts from SCSI device * Params : host - host to process * Returns : INTR_PROCESS if expecting another SBIC interrupt * INTR_IDLE if no interrupt * INTR_NEXT_COMMAND if we have finished processing the command */ static intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq) { unsigned int asr, ssr; asr = sbic_arm_read(host, SBIC_ASR); if (!(asr & ASR_INT)) return INTR_IDLE; ssr = sbic_arm_read(host, SBIC_SSR); #if (DEBUG & DEBUG_PHASES) print_sbic_status(asr, ssr, host->scsi.phase); #endif ADD_STATUS(8, ssr, host->scsi.phase, in_irq); if (host->SCpnt && !host->scsi.disconnectable) ADD_STATUS(host->SCpnt->device->id, ssr, host->scsi.phase, in_irq); switch (ssr) { case 0x00: /* reset state - not advanced */ printk(KERN_ERR "scsi%d: reset in standard mode but wanted advanced mode.\n", host->host->host_no); /* setup sbic - WD33C93A */ sbic_arm_write(host, SBIC_OWNID, OWNID_EAF | host->host->this_id); sbic_arm_write(host, SBIC_CMND, CMND_RESET); return INTR_IDLE; case 0x01: /* reset state - advanced */ sbic_arm_write(host, SBIC_CTRL, INIT_SBICDMA | CTRL_IDI); sbic_arm_write(host, SBIC_TIMEOUT, TIMEOUT_TIME); sbic_arm_write(host, SBIC_SYNCHTRANSFER, SYNCHTRANSFER_2DBA); sbic_arm_write(host, SBIC_SOURCEID, SOURCEID_ER | SOURCEID_DSP); msgqueue_flush(&host->scsi.msgs); return INTR_IDLE; case 0x41: /* unexpected disconnect aborted command */ acornscsi_disconnect_unexpected(host); return INTR_NEXT_COMMAND; } switch (host->scsi.phase) { case PHASE_CONNECTING: /* STATE: command removed from issue queue */ switch (ssr) { case 0x11: /* -> PHASE_CONNECTED */ /* BUS FREE -> SELECTION */ host->scsi.phase = PHASE_CONNECTED; msgqueue_flush(&host->scsi.msgs); host->dma.transferred = host->scsi.SCp.scsi_xferred; /* 33C93 gives next interrupt indicating bus phase */ asr = sbic_arm_read(host, SBIC_ASR); if (!(asr & ASR_INT)) break; ssr = sbic_arm_read(host, SBIC_SSR); ADD_STATUS(8, ssr, host->scsi.phase, 1); ADD_STATUS(host->SCpnt->device->id, ssr, host->scsi.phase, 1); goto connected; case 0x42: /* select timed out */ /* -> PHASE_IDLE */ acornscsi_done(host, &host->SCpnt, DID_NO_CONNECT); return INTR_NEXT_COMMAND; case 0x81: /* -> PHASE_RECONNECTED or PHASE_ABORTED */ /* BUS FREE -> RESELECTION */ host->origSCpnt = host->SCpnt; host->SCpnt = NULL; msgqueue_flush(&host->scsi.msgs); acornscsi_reconnect(host); break; default: printk(KERN_ERR "scsi%d.%c: PHASE_CONNECTING, SSR %02X?\n", host->host->host_no, acornscsi_target(host), ssr); acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); acornscsi_abortcmd(host); } return INTR_PROCESSING; connected: case PHASE_CONNECTED: /* STATE: device selected ok */ switch (ssr) { #ifdef NONSTANDARD case 0x8a: /* -> PHASE_COMMAND, PHASE_COMMANDPAUSED */ /* SELECTION -> COMMAND */ acornscsi_sendcommand(host); break; case 0x8b: /* -> PHASE_STATUS */ /* SELECTION -> STATUS */ acornscsi_readstatusbyte(host); host->scsi.phase = PHASE_STATUSIN; break; #endif case 0x8e: /* -> PHASE_MSGOUT */ /* SELECTION ->MESSAGE OUT */ host->scsi.phase = PHASE_MSGOUT; acornscsi_buildmessages(host); acornscsi_sendmessage(host); break; /* these should not happen */ case 0x85: /* target disconnected */ acornscsi_done(host, &host->SCpnt, DID_ERROR); break; default: printk(KERN_ERR "scsi%d.%c: PHASE_CONNECTED, SSR %02X?\n", host->host->host_no, acornscsi_target(host), ssr); acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); acornscsi_abortcmd(host); } return INTR_PROCESSING; case PHASE_MSGOUT: /* STATE: connected & sent IDENTIFY message */ /* * SCSI standard says that MESSAGE OUT phases can be followed by a * DATA phase, STATUS phase, MESSAGE IN phase or COMMAND phase */ switch (ssr) { case 0x8a: /* -> PHASE_COMMAND, PHASE_COMMANDPAUSED */ case 0x1a: /* -> PHASE_COMMAND, PHASE_COMMANDPAUSED */ /* MESSAGE OUT -> COMMAND */ acornscsi_sendcommand(host); break; case 0x8b: /* -> PHASE_STATUS */ case 0x1b: /* -> PHASE_STATUS */ /* MESSAGE OUT -> STATUS */ acornscsi_readstatusbyte(host); host->scsi.phase = PHASE_STATUSIN; break; case 0x8e: /* -> PHASE_MSGOUT */ /* MESSAGE_OUT(MESSAGE_IN) ->MESSAGE OUT */ acornscsi_sendmessage(host); break; case 0x4f: /* -> PHASE_MSGIN, PHASE_DISCONNECT */ case 0x1f: /* -> PHASE_MSGIN, PHASE_DISCONNECT */ /* MESSAGE OUT -> MESSAGE IN */ acornscsi_message(host); break; default: printk(KERN_ERR "scsi%d.%c: PHASE_MSGOUT, SSR %02X?\n", host->host->host_no, acornscsi_target(host), ssr); acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); } return INTR_PROCESSING; case PHASE_COMMAND: /* STATE: connected & command sent */ switch (ssr) { case 0x18: /* -> PHASE_DATAOUT */ /* COMMAND -> DATA OUT */ if (host->scsi.SCp.sent_command != host->SCpnt->cmd_len) acornscsi_abortcmd(host); acornscsi_dma_setup(host, DMA_OUT); if (!acornscsi_starttransfer(host)) acornscsi_abortcmd(host); host->scsi.phase = PHASE_DATAOUT; return INTR_IDLE; case 0x19: /* -> PHASE_DATAIN */ /* COMMAND -> DATA IN */ if (host->scsi.SCp.sent_command != host->SCpnt->cmd_len) acornscsi_abortcmd(host); acornscsi_dma_setup(host, DMA_IN); if (!acornscsi_starttransfer(host)) acornscsi_abortcmd(host); host->scsi.phase = PHASE_DATAIN; return INTR_IDLE; case 0x1b: /* -> PHASE_STATUS */ /* COMMAND -> STATUS */ acornscsi_readstatusbyte(host); host->scsi.phase = PHASE_STATUSIN; break; case 0x1e: /* -> PHASE_MSGOUT */ /* COMMAND -> MESSAGE OUT */ acornscsi_sendmessage(host); break; case 0x1f: /* -> PHASE_MSGIN, PHASE_DISCONNECT */ /* COMMAND -> MESSAGE IN */ acornscsi_message(host); break; default: printk(KERN_ERR "scsi%d.%c: PHASE_COMMAND, SSR %02X?\n", host->host->host_no, acornscsi_target(host), ssr); acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); } return INTR_PROCESSING; case PHASE_DISCONNECT: /* STATE: connected, received DISCONNECT msg */ if (ssr == 0x85) { /* -> PHASE_IDLE */ host->scsi.disconnectable = 1; host->scsi.reconnected.tag = 0; host->scsi.phase = PHASE_IDLE; host->stats.disconnects += 1; } else { printk(KERN_ERR "scsi%d.%c: PHASE_DISCONNECT, SSR %02X instead of disconnect?\n", host->host->host_no, acornscsi_target(host), ssr); acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); } return INTR_NEXT_COMMAND; case PHASE_IDLE: /* STATE: disconnected */ if (ssr == 0x81) /* -> PHASE_RECONNECTED or PHASE_ABORTED */ acornscsi_reconnect(host); else { printk(KERN_ERR "scsi%d.%c: PHASE_IDLE, SSR %02X while idle?\n", host->host->host_no, acornscsi_target(host), ssr); acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); } return INTR_PROCESSING; case PHASE_RECONNECTED: /* STATE: device reconnected to initiator */ /* * Command reconnected - if MESGIN, get message - it may be * the tag. If not, get command out of disconnected queue */ /* * If we reconnected and we're not in MESSAGE IN phase after IDENTIFY, * reconnect I_T_L command */ if (ssr != 0x8f && !acornscsi_reconnect_finish(host)) return INTR_IDLE; ADD_STATUS(host->SCpnt->device->id, ssr, host->scsi.phase, in_irq); switch (ssr) { case 0x88: /* data out phase */ /* -> PHASE_DATAOUT */ /* MESSAGE IN -> DATA OUT */ acornscsi_dma_setup(host, DMA_OUT); if (!acornscsi_starttransfer(host)) acornscsi_abortcmd(host); host->scsi.phase = PHASE_DATAOUT; return INTR_IDLE; case 0x89: /* data in phase */ /* -> PHASE_DATAIN */ /* MESSAGE IN -> DATA IN */ acornscsi_dma_setup(host, DMA_IN); if (!acornscsi_starttransfer(host)) acornscsi_abortcmd(host); host->scsi.phase = PHASE_DATAIN; return INTR_IDLE; case 0x8a: /* command out */ /* MESSAGE IN -> COMMAND */ acornscsi_sendcommand(host);/* -> PHASE_COMMAND, PHASE_COMMANDPAUSED */ break; case 0x8b: /* status in */ /* -> PHASE_STATUSIN */ /* MESSAGE IN -> STATUS */ acornscsi_readstatusbyte(host); host->scsi.phase = PHASE_STATUSIN; break; case 0x8e: /* message out */ /* -> PHASE_MSGOUT */ /* MESSAGE IN -> MESSAGE OUT */ acornscsi_sendmessage(host); break; case 0x8f: /* message in */ acornscsi_message(host); /* -> PHASE_MSGIN, PHASE_DISCONNECT */ break; default: printk(KERN_ERR "scsi%d.%c: PHASE_RECONNECTED, SSR %02X after reconnect?\n", host->host->host_no, acornscsi_target(host), ssr); acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); } return INTR_PROCESSING; case PHASE_DATAIN: /* STATE: transferred data in */ /* * This is simple - if we disconnect then the DMA address & count is * correct. */ switch (ssr) { case 0x19: /* -> PHASE_DATAIN */ case 0x89: /* -> PHASE_DATAIN */ acornscsi_abortcmd(host); return INTR_IDLE; case 0x1b: /* -> PHASE_STATUSIN */ case 0x4b: /* -> PHASE_STATUSIN */ case 0x8b: /* -> PHASE_STATUSIN */ /* DATA IN -> STATUS */ host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) - acornscsi_sbic_xfcount(host); acornscsi_dma_stop(host); acornscsi_readstatusbyte(host); host->scsi.phase = PHASE_STATUSIN; break; case 0x1e: /* -> PHASE_MSGOUT */ case 0x4e: /* -> PHASE_MSGOUT */ case 0x8e: /* -> PHASE_MSGOUT */ /* DATA IN -> MESSAGE OUT */ host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) - acornscsi_sbic_xfcount(host); acornscsi_dma_stop(host); acornscsi_sendmessage(host); break; case 0x1f: /* message in */ case 0x4f: /* message in */ case 0x8f: /* message in */ /* DATA IN -> MESSAGE IN */ host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) - acornscsi_sbic_xfcount(host); acornscsi_dma_stop(host); acornscsi_message(host); /* -> PHASE_MSGIN, PHASE_DISCONNECT */ break; default: printk(KERN_ERR "scsi%d.%c: PHASE_DATAIN, SSR %02X?\n", host->host->host_no, acornscsi_target(host), ssr); acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); } return INTR_PROCESSING; case PHASE_DATAOUT: /* STATE: transferred data out */ /* * This is more complicated - if we disconnect, the DMA could be 12 * bytes ahead of us. We need to correct this. */ switch (ssr) { case 0x18: /* -> PHASE_DATAOUT */ case 0x88: /* -> PHASE_DATAOUT */ acornscsi_abortcmd(host); return INTR_IDLE; case 0x1b: /* -> PHASE_STATUSIN */ case 0x4b: /* -> PHASE_STATUSIN */ case 0x8b: /* -> PHASE_STATUSIN */ /* DATA OUT -> STATUS */ host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) - acornscsi_sbic_xfcount(host); acornscsi_dma_stop(host); acornscsi_dma_adjust(host); acornscsi_readstatusbyte(host); host->scsi.phase = PHASE_STATUSIN; break; case 0x1e: /* -> PHASE_MSGOUT */ case 0x4e: /* -> PHASE_MSGOUT */ case 0x8e: /* -> PHASE_MSGOUT */ /* DATA OUT -> MESSAGE OUT */ host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) - acornscsi_sbic_xfcount(host); acornscsi_dma_stop(host); acornscsi_dma_adjust(host); acornscsi_sendmessage(host); break; case 0x1f: /* message in */ case 0x4f: /* message in */ case 0x8f: /* message in */ /* DATA OUT -> MESSAGE IN */ host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) - acornscsi_sbic_xfcount(host); acornscsi_dma_stop(host); acornscsi_dma_adjust(host); acornscsi_message(host); /* -> PHASE_MSGIN, PHASE_DISCONNECT */ break; default: printk(KERN_ERR "scsi%d.%c: PHASE_DATAOUT, SSR %02X?\n", host->host->host_no, acornscsi_target(host), ssr); acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); } return INTR_PROCESSING; case PHASE_STATUSIN: /* STATE: status in complete */ switch (ssr) { case 0x1f: /* -> PHASE_MSGIN, PHASE_DONE, PHASE_DISCONNECT */ case 0x8f: /* -> PHASE_MSGIN, PHASE_DONE, PHASE_DISCONNECT */ /* STATUS -> MESSAGE IN */ acornscsi_message(host); break; case 0x1e: /* -> PHASE_MSGOUT */ case 0x8e: /* -> PHASE_MSGOUT */ /* STATUS -> MESSAGE OUT */ acornscsi_sendmessage(host); break; default: printk(KERN_ERR "scsi%d.%c: PHASE_STATUSIN, SSR %02X instead of MESSAGE_IN?\n", host->host->host_no, acornscsi_target(host), ssr); acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); } return INTR_PROCESSING; case PHASE_MSGIN: /* STATE: message in */ switch (ssr) { case 0x1e: /* -> PHASE_MSGOUT */ case 0x4e: /* -> PHASE_MSGOUT */ case 0x8e: /* -> PHASE_MSGOUT */ /* MESSAGE IN -> MESSAGE OUT */ acornscsi_sendmessage(host); break; case 0x1f: /* -> PHASE_MSGIN, PHASE_DONE, PHASE_DISCONNECT */ case 0x2f: case 0x4f: case 0x8f: acornscsi_message(host); break; case 0x85: printk("scsi%d.%c: strange message in disconnection\n", host->host->host_no, acornscsi_target(host)); acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); acornscsi_done(host, &host->SCpnt, DID_ERROR); break; default: printk(KERN_ERR "scsi%d.%c: PHASE_MSGIN, SSR %02X after message in?\n", host->host->host_no, acornscsi_target(host), ssr); acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); } return INTR_PROCESSING; case PHASE_DONE: /* STATE: received status & message */ switch (ssr) { case 0x85: /* -> PHASE_IDLE */ acornscsi_done(host, &host->SCpnt, DID_OK); return INTR_NEXT_COMMAND; case 0x1e: case 0x8e: acornscsi_sendmessage(host); break; default: printk(KERN_ERR "scsi%d.%c: PHASE_DONE, SSR %02X instead of disconnect?\n", host->host->host_no, acornscsi_target(host), ssr); acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); } return INTR_PROCESSING; case PHASE_ABORTED: switch (ssr) { case 0x85: if (host->SCpnt) acornscsi_done(host, &host->SCpnt, DID_ABORT); else { clear_bit(host->scsi.reconnected.target * 8 + host->scsi.reconnected.lun, host->busyluns); host->scsi.phase = PHASE_IDLE; } return INTR_NEXT_COMMAND; case 0x1e: case 0x2e: case 0x4e: case 0x8e: acornscsi_sendmessage(host); break; default: printk(KERN_ERR "scsi%d.%c: PHASE_ABORTED, SSR %02X?\n", host->host->host_no, acornscsi_target(host), ssr); acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); } return INTR_PROCESSING; default: printk(KERN_ERR "scsi%d.%c: unknown driver phase %d\n", host->host->host_no, acornscsi_target(host), ssr); acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); } return INTR_PROCESSING; } /* * Prototype: void acornscsi_intr(int irq, void *dev_id) * Purpose : handle interrupts from Acorn SCSI card * Params : irq - interrupt number * dev_id - device specific data (AS_Host structure) */ static irqreturn_t acornscsi_intr(int irq, void *dev_id) { AS_Host *host = (AS_Host *)dev_id; intr_ret_t ret; int iostatus; int in_irq = 0; do { ret = INTR_IDLE; iostatus = readb(host->fast + INT_REG); if (iostatus & 2) { acornscsi_dma_intr(host); iostatus = readb(host->fast + INT_REG); } if (iostatus & 8) ret = acornscsi_sbicintr(host, in_irq); /* * If we have a transfer pending, start it. * Only start it if the interface has already started transferring * it's data */ if (host->dma.xfer_required) acornscsi_dma_xfer(host); if (ret == INTR_NEXT_COMMAND) ret = acornscsi_kick(host); in_irq = 1; } while (ret != INTR_IDLE); return IRQ_HANDLED; } /*============================================================================================= * Interfaces between interrupt handler and rest of scsi code */ /* * Function : acornscsi_queuecmd(struct scsi_cmnd *cmd) * Purpose : queues a SCSI command * Params : cmd - SCSI command * Returns : 0, or < 0 on error. */ static int acornscsi_queuecmd_lck(struct scsi_cmnd *SCpnt) { struct scsi_pointer *scsi_pointer = arm_scsi_pointer(SCpnt); void (*done)(struct scsi_cmnd *) = scsi_done; AS_Host *host = (AS_Host *)SCpnt->device->host->hostdata; #if (DEBUG & DEBUG_NO_WRITE) if (acornscsi_cmdtype(SCpnt->cmnd[0]) == CMD_WRITE && (NO_WRITE & (1 << SCpnt->device->id))) { printk(KERN_CRIT "scsi%d.%c: WRITE attempted with NO_WRITE flag set\n", host->host->host_no, '0' + SCpnt->device->id); set_host_byte(SCpnt, DID_NO_CONNECT); done(SCpnt); return 0; } #endif SCpnt->host_scribble = NULL; SCpnt->result = 0; scsi_pointer->phase = (int)acornscsi_datadirection(SCpnt->cmnd[0]); scsi_pointer->sent_command = 0; scsi_pointer->scsi_xferred = 0; init_SCp(SCpnt); host->stats.queues += 1; { unsigned long flags; if (!queue_add_cmd_ordered(&host->queues.issue, SCpnt)) { set_host_byte(SCpnt, DID_ERROR); done(SCpnt); return 0; } local_irq_save(flags); if (host->scsi.phase == PHASE_IDLE) acornscsi_kick(host); local_irq_restore(flags); } return 0; } DEF_SCSI_QCMD(acornscsi_queuecmd) enum res_abort { res_not_running, res_success, res_success_clear, res_snooze }; /* * Prototype: enum res acornscsi_do_abort(struct scsi_cmnd *SCpnt) * Purpose : abort a command on this host * Params : SCpnt - command to abort * Returns : our abort status */ static enum res_abort acornscsi_do_abort(AS_Host *host, struct scsi_cmnd *SCpnt) { enum res_abort res = res_not_running; if (queue_remove_cmd(&host->queues.issue, SCpnt)) { /* * The command was on the issue queue, and has not been * issued yet. We can remove the command from the queue, * and acknowledge the abort. Neither the devices nor the * interface know about the command. */ //#if (DEBUG & DEBUG_ABORT) printk("on issue queue "); //#endif res = res_success; } else if (queue_remove_cmd(&host->queues.disconnected, SCpnt)) { /* * The command was on the disconnected queue. Simply * acknowledge the abort condition, and when the target * reconnects, we will give it an ABORT message. The * target should then disconnect, and we will clear * the busylun bit. */ //#if (DEBUG & DEBUG_ABORT) printk("on disconnected queue "); //#endif res = res_success; } else if (host->SCpnt == SCpnt) { unsigned long flags; //#if (DEBUG & DEBUG_ABORT) printk("executing "); //#endif local_irq_save(flags); switch (host->scsi.phase) { /* * If the interface is idle, and the command is 'disconnectable', * then it is the same as on the disconnected queue. We simply * remove all traces of the command. When the target reconnects, * we will give it an ABORT message since the command could not * be found. When the target finally disconnects, we will clear * the busylun bit. */ case PHASE_IDLE: if (host->scsi.disconnectable) { host->scsi.disconnectable = 0; host->SCpnt = NULL; res = res_success; } break; /* * If the command has connected and done nothing further, * simply force a disconnect. We also need to clear the * busylun bit. */ case PHASE_CONNECTED: sbic_arm_write(host, SBIC_CMND, CMND_DISCONNECT); host->SCpnt = NULL; res = res_success_clear; break; default: acornscsi_abortcmd(host); res = res_snooze; } local_irq_restore(flags); } else if (host->origSCpnt == SCpnt) { /* * The command will be executed next, but a command * is currently using the interface. This is similar to * being on the issue queue, except the busylun bit has * been set. */ host->origSCpnt = NULL; //#if (DEBUG & DEBUG_ABORT) printk("waiting for execution "); //#endif res = res_success_clear; } else printk("unknown "); return res; } /* * Prototype: int acornscsi_abort(struct scsi_cmnd *SCpnt) * Purpose : abort a command on this host * Params : SCpnt - command to abort * Returns : one of SCSI_ABORT_ macros */ int acornscsi_abort(struct scsi_cmnd *SCpnt) { AS_Host *host = (AS_Host *) SCpnt->device->host->hostdata; int result; host->stats.aborts += 1; #if (DEBUG & DEBUG_ABORT) { int asr, ssr; asr = sbic_arm_read(host, SBIC_ASR); ssr = sbic_arm_read(host, SBIC_SSR); printk(KERN_WARNING "acornscsi_abort: "); print_sbic_status(asr, ssr, host->scsi.phase); acornscsi_dumplog(host, SCpnt->device->id); } #endif printk("scsi%d: ", host->host->host_no); switch (acornscsi_do_abort(host, SCpnt)) { /* * We managed to find the command and cleared it out. * We do not expect the command to be executing on the * target, but we have set the busylun bit. */ case res_success_clear: //#if (DEBUG & DEBUG_ABORT) printk("clear "); //#endif clear_bit(SCpnt->device->id * 8 + (u8)(SCpnt->device->lun & 0x7), host->busyluns); fallthrough; /* * We found the command, and cleared it out. Either * the command is still known to be executing on the * target, or the busylun bit is not set. */ case res_success: //#if (DEBUG & DEBUG_ABORT) printk("success\n"); //#endif result = SUCCESS; break; /* * We did find the command, but unfortunately we couldn't * unhook it from ourselves. Wait some more, and if it * still doesn't complete, reset the interface. */ case res_snooze: //#if (DEBUG & DEBUG_ABORT) printk("snooze\n"); //#endif result = FAILED; break; /* * The command could not be found (either because it completed, * or it got dropped. */ default: case res_not_running: acornscsi_dumplog(host, SCpnt->device->id); result = FAILED; //#if (DEBUG & DEBUG_ABORT) printk("not running\n"); //#endif break; } return result; } /* * Prototype: int acornscsi_reset(struct scsi_cmnd *SCpnt) * Purpose : reset a command on this host/reset this host * Params : SCpnt - command causing reset * Returns : one of SCSI_RESET_ macros */ int acornscsi_host_reset(struct scsi_cmnd *SCpnt) { AS_Host *host = (AS_Host *)SCpnt->device->host->hostdata; struct scsi_cmnd *SCptr; host->stats.resets += 1; #if (DEBUG & DEBUG_RESET) { int asr, ssr, devidx; asr = sbic_arm_read(host, SBIC_ASR); ssr = sbic_arm_read(host, SBIC_SSR); printk(KERN_WARNING "acornscsi_reset: "); print_sbic_status(asr, ssr, host->scsi.phase); for (devidx = 0; devidx < 9; devidx++) acornscsi_dumplog(host, devidx); } #endif acornscsi_dma_stop(host); /* * do hard reset. This resets all devices on this host, and so we * must set the reset status on all commands. */ acornscsi_resetcard(host); while ((SCptr = queue_remove(&host->queues.disconnected)) != NULL) ; return SUCCESS; } /*============================================================================================== * initialisation & miscellaneous support */ /* * Function: char *acornscsi_info(struct Scsi_Host *host) * Purpose : return a string describing this interface * Params : host - host to give information on * Returns : a constant string */ const char *acornscsi_info(struct Scsi_Host *host) { static char string[100], *p; p = string; p += sprintf(string, "%s at port %08lX irq %d v%d.%d.%d" #ifdef CONFIG_SCSI_ACORNSCSI_SYNC " SYNC" #endif #if (DEBUG & DEBUG_NO_WRITE) " NOWRITE (" __stringify(NO_WRITE) ")" #endif , host->hostt->name, host->io_port, host->irq, VER_MAJOR, VER_MINOR, VER_PATCH); return string; } static int acornscsi_show_info(struct seq_file *m, struct Scsi_Host *instance) { int devidx; struct scsi_device *scd; AS_Host *host; host = (AS_Host *)instance->hostdata; seq_printf(m, "AcornSCSI driver v%d.%d.%d" #ifdef CONFIG_SCSI_ACORNSCSI_SYNC " SYNC" #endif #if (DEBUG & DEBUG_NO_WRITE) " NOWRITE (" __stringify(NO_WRITE) ")" #endif "\n\n", VER_MAJOR, VER_MINOR, VER_PATCH); seq_printf(m, "SBIC: WD33C93A Address: %p IRQ : %d\n", host->base + SBIC_REGIDX, host->scsi.irq); #ifdef USE_DMAC seq_printf(m, "DMAC: uPC71071 Address: %p IRQ : %d\n\n", host->base + DMAC_OFFSET, host->scsi.irq); #endif seq_printf(m, "Statistics:\n" "Queued commands: %-10u Issued commands: %-10u\n" "Done commands : %-10u Reads : %-10u\n" "Writes : %-10u Others : %-10u\n" "Disconnects : %-10u Aborts : %-10u\n" "Resets : %-10u\n\nLast phases:", host->stats.queues, host->stats.removes, host->stats.fins, host->stats.reads, host->stats.writes, host->stats.miscs, host->stats.disconnects, host->stats.aborts, host->stats.resets); for (devidx = 0; devidx < 9; devidx ++) { unsigned int statptr, prev; seq_printf(m, "\n%c:", devidx == 8 ? 'H' : ('0' + devidx)); statptr = host->status_ptr[devidx] - 10; if ((signed int)statptr < 0) statptr += STATUS_BUFFER_SIZE; prev = host->status[devidx][statptr].when; for (; statptr != host->status_ptr[devidx]; statptr = (statptr + 1) & (STATUS_BUFFER_SIZE - 1)) { if (host->status[devidx][statptr].when) { seq_printf(m, "%c%02X:%02X+%2ld", host->status[devidx][statptr].irq ? '-' : ' ', host->status[devidx][statptr].ph, host->status[devidx][statptr].ssr, (host->status[devidx][statptr].when - prev) < 100 ? (host->status[devidx][statptr].when - prev) : 99); prev = host->status[devidx][statptr].when; } } } seq_printf(m, "\nAttached devices:\n"); shost_for_each_device(scd, instance) { seq_printf(m, "Device/Lun TaggedQ Sync\n"); seq_printf(m, " %d/%llu ", scd->id, scd->lun); if (scd->tagged_supported) seq_printf(m, "%3sabled ", scd->simple_tags ? "en" : "dis"); else seq_printf(m, "unsupported "); if (host->device[scd->id].sync_xfer & 15) seq_printf(m, "offset %d, %d ns\n", host->device[scd->id].sync_xfer & 15, acornscsi_getperiod(host->device[scd->id].sync_xfer)); else seq_printf(m, "async\n"); } return 0; } static const struct scsi_host_template acornscsi_template = { .module = THIS_MODULE, .show_info = acornscsi_show_info, .name = "AcornSCSI", .info = acornscsi_info, .queuecommand = acornscsi_queuecmd, .eh_abort_handler = acornscsi_abort, .eh_host_reset_handler = acornscsi_host_reset, .can_queue = 16, .this_id = 7, .sg_tablesize = SG_ALL, .cmd_per_lun = 2, .dma_boundary = PAGE_SIZE - 1, .proc_name = "acornscsi", .cmd_size = sizeof(struct arm_cmd_priv), }; static int acornscsi_probe(struct expansion_card *ec, const struct ecard_id *id) { struct Scsi_Host *host; AS_Host *ashost; int ret; ret = ecard_request_resources(ec); if (ret) goto out; host = scsi_host_alloc(&acornscsi_template, sizeof(AS_Host)); if (!host) { ret = -ENOMEM; goto out_release; } ashost = (AS_Host *)host->hostdata; ashost->base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0); ashost->fast = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); if (!ashost->base || !ashost->fast) { ret = -ENOMEM; goto out_put; } host->irq = ec->irq; ashost->host = host; ashost->scsi.irq = host->irq; ec->irqaddr = ashost->fast + INT_REG; ec->irqmask = 0x0a; ret = request_irq(host->irq, acornscsi_intr, 0, "acornscsi", ashost); if (ret) { printk(KERN_CRIT "scsi%d: IRQ%d not free: %d\n", host->host_no, ashost->scsi.irq, ret); goto out_put; } memset(&ashost->stats, 0, sizeof (ashost->stats)); queue_initialise(&ashost->queues.issue); queue_initialise(&ashost->queues.disconnected); msgqueue_initialise(&ashost->scsi.msgs); acornscsi_resetcard(ashost); ret = scsi_add_host(host, &ec->dev); if (ret) goto out_irq; scsi_scan_host(host); goto out; out_irq: free_irq(host->irq, ashost); msgqueue_free(&ashost->scsi.msgs); queue_free(&ashost->queues.disconnected); queue_free(&ashost->queues.issue); out_put: ecardm_iounmap(ec, ashost->fast); ecardm_iounmap(ec, ashost->base); scsi_host_put(host); out_release: ecard_release_resources(ec); out: return ret; } static void acornscsi_remove(struct expansion_card *ec) { struct Scsi_Host *host = ecard_get_drvdata(ec); AS_Host *ashost = (AS_Host *)host->hostdata; ecard_set_drvdata(ec, NULL); scsi_remove_host(host); /* * Put card into RESET state */ writeb(0x80, ashost->fast + PAGE_REG); free_irq(host->irq, ashost); msgqueue_free(&ashost->scsi.msgs); queue_free(&ashost->queues.disconnected); queue_free(&ashost->queues.issue); ecardm_iounmap(ec, ashost->fast); ecardm_iounmap(ec, ashost->base); scsi_host_put(host); ecard_release_resources(ec); } static const struct ecard_id acornscsi_cids[] = { { MANU_ACORN, PROD_ACORN_SCSI }, { 0xffff, 0xffff }, }; static struct ecard_driver acornscsi_driver = { .probe = acornscsi_probe, .remove = acornscsi_remove, .id_table = acornscsi_cids, .drv = { .name = "acornscsi", }, }; static int __init acornscsi_init(void) { return ecard_register_driver(&acornscsi_driver); } static void __exit acornscsi_exit(void) { ecard_remove_driver(&acornscsi_driver); } module_init(acornscsi_init); module_exit(acornscsi_exit); MODULE_AUTHOR("Russell King"); MODULE_DESCRIPTION("AcornSCSI driver"); MODULE_LICENSE("GPL");
linux-master
drivers/scsi/arm/acornscsi.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family * of PCI-SCSI IO processors. * * Copyright (C) 1999-2001 Gerard Roudier <[email protected]> * * This driver is derived from the Linux sym53c8xx driver. * Copyright (C) 1998-2000 Gerard Roudier * * The sym53c8xx driver is derived from the ncr53c8xx driver that had been * a port of the FreeBSD ncr driver to Linux-1.2.13. * * The original ncr driver has been written for 386bsd and FreeBSD by * Wolfgang Stanglmeier <[email protected]> * Stefan Esser <[email protected]> * Copyright (C) 1994 Wolfgang Stanglmeier * * Other major contributions: * * NVRAM detection and reading. * Copyright (C) 1997 Richard Waltham <[email protected]> * *----------------------------------------------------------------------------- */ #include "sym_glue.h" #include "sym_nvram.h" #ifdef SYM_CONF_DEBUG_NVRAM static u_char Tekram_boot_delay[7] = {3, 5, 10, 20, 30, 60, 120}; #endif /* * Get host setup from NVRAM. */ void sym_nvram_setup_host(struct Scsi_Host *shost, struct sym_hcb *np, struct sym_nvram *nvram) { /* * Get parity checking, host ID, verbose mode * and miscellaneous host flags from NVRAM. */ switch (nvram->type) { case SYM_SYMBIOS_NVRAM: if (!(nvram->data.Symbios.flags & SYMBIOS_PARITY_ENABLE)) np->rv_scntl0 &= ~0x0a; np->myaddr = nvram->data.Symbios.host_id & 0x0f; if (nvram->data.Symbios.flags & SYMBIOS_VERBOSE_MSGS) np->verbose += 1; if (nvram->data.Symbios.flags1 & SYMBIOS_SCAN_HI_LO) shost->reverse_ordering = 1; if (nvram->data.Symbios.flags2 & SYMBIOS_AVOID_BUS_RESET) np->usrflags |= SYM_AVOID_BUS_RESET; break; case SYM_TEKRAM_NVRAM: np->myaddr = nvram->data.Tekram.host_id & 0x0f; break; #ifdef CONFIG_PARISC case SYM_PARISC_PDC: if (nvram->data.parisc.host_id != -1) np->myaddr = nvram->data.parisc.host_id; if (nvram->data.parisc.factor != -1) np->minsync = nvram->data.parisc.factor; if (nvram->data.parisc.width != -1) np->maxwide = nvram->data.parisc.width; switch (nvram->data.parisc.mode) { case 0: np->scsi_mode = SMODE_SE; break; case 1: np->scsi_mode = SMODE_HVD; break; case 2: np->scsi_mode = SMODE_LVD; break; default: break; } #endif default: break; } } /* * Get target set-up from Symbios format NVRAM. */ static void sym_Symbios_setup_target(struct sym_tcb *tp, int target, Symbios_nvram *nvram) { Symbios_target *tn = &nvram->target[target]; if (!(tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)) tp->usrtags = 0; if (!(tn->flags & SYMBIOS_DISCONNECT_ENABLE)) tp->usrflags &= ~SYM_DISC_ENABLED; if (!(tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME)) tp->usrflags |= SYM_SCAN_BOOT_DISABLED; if (!(tn->flags & SYMBIOS_SCAN_LUNS)) tp->usrflags |= SYM_SCAN_LUNS_DISABLED; tp->usr_period = (tn->sync_period + 3) / 4; tp->usr_width = (tn->bus_width == 0x8) ? 0 : 1; } static const unsigned char Tekram_sync[16] = { 25, 31, 37, 43, 50, 62, 75, 125, 12, 15, 18, 21, 6, 7, 9, 10 }; /* * Get target set-up from Tekram format NVRAM. */ static void sym_Tekram_setup_target(struct sym_tcb *tp, int target, Tekram_nvram *nvram) { struct Tekram_target *tn = &nvram->target[target]; if (tn->flags & TEKRAM_TAGGED_COMMANDS) { tp->usrtags = 2 << nvram->max_tags_index; } if (tn->flags & TEKRAM_DISCONNECT_ENABLE) tp->usrflags |= SYM_DISC_ENABLED; if (tn->flags & TEKRAM_SYNC_NEGO) tp->usr_period = Tekram_sync[tn->sync_index & 0xf]; tp->usr_width = (tn->flags & TEKRAM_WIDE_NEGO) ? 1 : 0; } /* * Get target setup from NVRAM. */ void sym_nvram_setup_target(struct sym_tcb *tp, int target, struct sym_nvram *nvp) { switch (nvp->type) { case SYM_SYMBIOS_NVRAM: sym_Symbios_setup_target(tp, target, &nvp->data.Symbios); break; case SYM_TEKRAM_NVRAM: sym_Tekram_setup_target(tp, target, &nvp->data.Tekram); break; default: break; } } #ifdef SYM_CONF_DEBUG_NVRAM /* * Dump Symbios format NVRAM for debugging purpose. */ static void sym_display_Symbios_nvram(struct sym_device *np, Symbios_nvram *nvram) { int i; /* display Symbios nvram host data */ printf("%s: HOST ID=%d%s%s%s%s%s%s\n", sym_name(np), nvram->host_id & 0x0f, (nvram->flags & SYMBIOS_SCAM_ENABLE) ? " SCAM" :"", (nvram->flags & SYMBIOS_PARITY_ENABLE) ? " PARITY" :"", (nvram->flags & SYMBIOS_VERBOSE_MSGS) ? " VERBOSE" :"", (nvram->flags & SYMBIOS_CHS_MAPPING) ? " CHS_ALT" :"", (nvram->flags2 & SYMBIOS_AVOID_BUS_RESET)?" NO_RESET" :"", (nvram->flags1 & SYMBIOS_SCAN_HI_LO) ? " HI_LO" :""); /* display Symbios nvram drive data */ for (i = 0 ; i < 15 ; i++) { struct Symbios_target *tn = &nvram->target[i]; printf("%s-%d:%s%s%s%s WIDTH=%d SYNC=%d TMO=%d\n", sym_name(np), i, (tn->flags & SYMBIOS_DISCONNECT_ENABLE) ? " DISC" : "", (tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME) ? " SCAN_BOOT" : "", (tn->flags & SYMBIOS_SCAN_LUNS) ? " SCAN_LUNS" : "", (tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)? " TCQ" : "", tn->bus_width, tn->sync_period / 4, tn->timeout); } } /* * Dump TEKRAM format NVRAM for debugging purpose. */ static void sym_display_Tekram_nvram(struct sym_device *np, Tekram_nvram *nvram) { int i, tags, boot_delay; char *rem; /* display Tekram nvram host data */ tags = 2 << nvram->max_tags_index; boot_delay = 0; if (nvram->boot_delay_index < 6) boot_delay = Tekram_boot_delay[nvram->boot_delay_index]; switch ((nvram->flags & TEKRAM_REMOVABLE_FLAGS) >> 6) { default: case 0: rem = ""; break; case 1: rem = " REMOVABLE=boot device"; break; case 2: rem = " REMOVABLE=all"; break; } printf("%s: HOST ID=%d%s%s%s%s%s%s%s%s%s BOOT DELAY=%d tags=%d\n", sym_name(np), nvram->host_id & 0x0f, (nvram->flags1 & SYMBIOS_SCAM_ENABLE) ? " SCAM" :"", (nvram->flags & TEKRAM_MORE_THAN_2_DRIVES) ? " >2DRIVES":"", (nvram->flags & TEKRAM_DRIVES_SUP_1GB) ? " >1GB" :"", (nvram->flags & TEKRAM_RESET_ON_POWER_ON) ? " RESET" :"", (nvram->flags & TEKRAM_ACTIVE_NEGATION) ? " ACT_NEG" :"", (nvram->flags & TEKRAM_IMMEDIATE_SEEK) ? " IMM_SEEK" :"", (nvram->flags & TEKRAM_SCAN_LUNS) ? " SCAN_LUNS" :"", (nvram->flags1 & TEKRAM_F2_F6_ENABLED) ? " F2_F6" :"", rem, boot_delay, tags); /* display Tekram nvram drive data */ for (i = 0; i <= 15; i++) { int sync, j; struct Tekram_target *tn = &nvram->target[i]; j = tn->sync_index & 0xf; sync = Tekram_sync[j]; printf("%s-%d:%s%s%s%s%s%s PERIOD=%d\n", sym_name(np), i, (tn->flags & TEKRAM_PARITY_CHECK) ? " PARITY" : "", (tn->flags & TEKRAM_SYNC_NEGO) ? " SYNC" : "", (tn->flags & TEKRAM_DISCONNECT_ENABLE) ? " DISC" : "", (tn->flags & TEKRAM_START_CMD) ? " START" : "", (tn->flags & TEKRAM_TAGGED_COMMANDS) ? " TCQ" : "", (tn->flags & TEKRAM_WIDE_NEGO) ? " WIDE" : "", sync); } } #else static void sym_display_Symbios_nvram(struct sym_device *np, Symbios_nvram *nvram) { (void)np; (void)nvram; } static void sym_display_Tekram_nvram(struct sym_device *np, Tekram_nvram *nvram) { (void)np; (void)nvram; } #endif /* SYM_CONF_DEBUG_NVRAM */ /* * 24C16 EEPROM reading. * * GPIO0 - data in/data out * GPIO1 - clock * Symbios NVRAM wiring now also used by Tekram. */ #define SET_BIT 0 #define CLR_BIT 1 #define SET_CLK 2 #define CLR_CLK 3 /* * Set/clear data/clock bit in GPIO0 */ static void S24C16_set_bit(struct sym_device *np, u_char write_bit, u_char *gpreg, int bit_mode) { udelay(5); switch (bit_mode) { case SET_BIT: *gpreg |= write_bit; break; case CLR_BIT: *gpreg &= 0xfe; break; case SET_CLK: *gpreg |= 0x02; break; case CLR_CLK: *gpreg &= 0xfd; break; } OUTB(np, nc_gpreg, *gpreg); INB(np, nc_mbox1); udelay(5); } /* * Send START condition to NVRAM to wake it up. */ static void S24C16_start(struct sym_device *np, u_char *gpreg) { S24C16_set_bit(np, 1, gpreg, SET_BIT); S24C16_set_bit(np, 0, gpreg, SET_CLK); S24C16_set_bit(np, 0, gpreg, CLR_BIT); S24C16_set_bit(np, 0, gpreg, CLR_CLK); } /* * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZzzzz!! */ static void S24C16_stop(struct sym_device *np, u_char *gpreg) { S24C16_set_bit(np, 0, gpreg, SET_CLK); S24C16_set_bit(np, 1, gpreg, SET_BIT); } /* * Read or write a bit to the NVRAM, * read if GPIO0 input else write if GPIO0 output */ static void S24C16_do_bit(struct sym_device *np, u_char *read_bit, u_char write_bit, u_char *gpreg) { S24C16_set_bit(np, write_bit, gpreg, SET_BIT); S24C16_set_bit(np, 0, gpreg, SET_CLK); if (read_bit) *read_bit = INB(np, nc_gpreg); S24C16_set_bit(np, 0, gpreg, CLR_CLK); S24C16_set_bit(np, 0, gpreg, CLR_BIT); } /* * Output an ACK to the NVRAM after reading, * change GPIO0 to output and when done back to an input */ static void S24C16_write_ack(struct sym_device *np, u_char write_bit, u_char *gpreg, u_char *gpcntl) { OUTB(np, nc_gpcntl, *gpcntl & 0xfe); S24C16_do_bit(np, NULL, write_bit, gpreg); OUTB(np, nc_gpcntl, *gpcntl); } /* * Input an ACK from NVRAM after writing, * change GPIO0 to input and when done back to an output */ static void S24C16_read_ack(struct sym_device *np, u_char *read_bit, u_char *gpreg, u_char *gpcntl) { OUTB(np, nc_gpcntl, *gpcntl | 0x01); S24C16_do_bit(np, read_bit, 1, gpreg); OUTB(np, nc_gpcntl, *gpcntl); } /* * WRITE a byte to the NVRAM and then get an ACK to see it was accepted OK, * GPIO0 must already be set as an output */ static void S24C16_write_byte(struct sym_device *np, u_char *ack_data, u_char write_data, u_char *gpreg, u_char *gpcntl) { int x; for (x = 0; x < 8; x++) S24C16_do_bit(np, NULL, (write_data >> (7 - x)) & 0x01, gpreg); S24C16_read_ack(np, ack_data, gpreg, gpcntl); } /* * READ a byte from the NVRAM and then send an ACK to say we have got it, * GPIO0 must already be set as an input */ static void S24C16_read_byte(struct sym_device *np, u_char *read_data, u_char ack_data, u_char *gpreg, u_char *gpcntl) { int x; u_char read_bit; *read_data = 0; for (x = 0; x < 8; x++) { S24C16_do_bit(np, &read_bit, 1, gpreg); *read_data |= ((read_bit & 0x01) << (7 - x)); } S24C16_write_ack(np, ack_data, gpreg, gpcntl); } #ifdef SYM_CONF_NVRAM_WRITE_SUPPORT /* * Write 'len' bytes starting at 'offset'. */ static int sym_write_S24C16_nvram(struct sym_device *np, int offset, u_char *data, int len) { u_char gpcntl, gpreg; u_char old_gpcntl, old_gpreg; u_char ack_data; int x; /* save current state of GPCNTL and GPREG */ old_gpreg = INB(np, nc_gpreg); old_gpcntl = INB(np, nc_gpcntl); gpcntl = old_gpcntl & 0x1c; /* set up GPREG & GPCNTL to set GPIO0 and GPIO1 in to known state */ OUTB(np, nc_gpreg, old_gpreg); OUTB(np, nc_gpcntl, gpcntl); /* this is to set NVRAM into a known state with GPIO0/1 both low */ gpreg = old_gpreg; S24C16_set_bit(np, 0, &gpreg, CLR_CLK); S24C16_set_bit(np, 0, &gpreg, CLR_BIT); /* now set NVRAM inactive with GPIO0/1 both high */ S24C16_stop(np, &gpreg); /* NVRAM has to be written in segments of 16 bytes */ for (x = 0; x < len ; x += 16) { do { S24C16_start(np, &gpreg); S24C16_write_byte(np, &ack_data, 0xa0 | (((offset+x) >> 7) & 0x0e), &gpreg, &gpcntl); } while (ack_data & 0x01); S24C16_write_byte(np, &ack_data, (offset+x) & 0xff, &gpreg, &gpcntl); for (y = 0; y < 16; y++) S24C16_write_byte(np, &ack_data, data[x+y], &gpreg, &gpcntl); S24C16_stop(np, &gpreg); } /* return GPIO0/1 to original states after having accessed NVRAM */ OUTB(np, nc_gpcntl, old_gpcntl); OUTB(np, nc_gpreg, old_gpreg); return 0; } #endif /* SYM_CONF_NVRAM_WRITE_SUPPORT */ /* * Read 'len' bytes starting at 'offset'. */ static int sym_read_S24C16_nvram(struct sym_device *np, int offset, u_char *data, int len) { u_char gpcntl, gpreg; u_char old_gpcntl, old_gpreg; u_char ack_data; int retv = 1; int x; /* save current state of GPCNTL and GPREG */ old_gpreg = INB(np, nc_gpreg); old_gpcntl = INB(np, nc_gpcntl); gpcntl = old_gpcntl & 0x1c; /* set up GPREG & GPCNTL to set GPIO0 and GPIO1 in to known state */ OUTB(np, nc_gpreg, old_gpreg); OUTB(np, nc_gpcntl, gpcntl); /* this is to set NVRAM into a known state with GPIO0/1 both low */ gpreg = old_gpreg; S24C16_set_bit(np, 0, &gpreg, CLR_CLK); S24C16_set_bit(np, 0, &gpreg, CLR_BIT); /* now set NVRAM inactive with GPIO0/1 both high */ S24C16_stop(np, &gpreg); /* activate NVRAM */ S24C16_start(np, &gpreg); /* write device code and random address MSB */ S24C16_write_byte(np, &ack_data, 0xa0 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl); if (ack_data & 0x01) goto out; /* write random address LSB */ S24C16_write_byte(np, &ack_data, offset & 0xff, &gpreg, &gpcntl); if (ack_data & 0x01) goto out; /* regenerate START state to set up for reading */ S24C16_start(np, &gpreg); /* rewrite device code and address MSB with read bit set (lsb = 0x01) */ S24C16_write_byte(np, &ack_data, 0xa1 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl); if (ack_data & 0x01) goto out; /* now set up GPIO0 for inputting data */ gpcntl |= 0x01; OUTB(np, nc_gpcntl, gpcntl); /* input all requested data - only part of total NVRAM */ for (x = 0; x < len; x++) S24C16_read_byte(np, &data[x], (x == (len-1)), &gpreg, &gpcntl); /* finally put NVRAM back in inactive mode */ gpcntl &= 0xfe; OUTB(np, nc_gpcntl, gpcntl); S24C16_stop(np, &gpreg); retv = 0; out: /* return GPIO0/1 to original states after having accessed NVRAM */ OUTB(np, nc_gpcntl, old_gpcntl); OUTB(np, nc_gpreg, old_gpreg); return retv; } #undef SET_BIT #undef CLR_BIT #undef SET_CLK #undef CLR_CLK /* * Try reading Symbios NVRAM. * Return 0 if OK. */ static int sym_read_Symbios_nvram(struct sym_device *np, Symbios_nvram *nvram) { static u_char Symbios_trailer[6] = {0xfe, 0xfe, 0, 0, 0, 0}; u_char *data = (u_char *) nvram; int len = sizeof(*nvram); u_short csum; int x; /* probe the 24c16 and read the SYMBIOS 24c16 area */ if (sym_read_S24C16_nvram (np, SYMBIOS_NVRAM_ADDRESS, data, len)) return 1; /* check valid NVRAM signature, verify byte count and checksum */ if (nvram->type != 0 || memcmp(nvram->trailer, Symbios_trailer, 6) || nvram->byte_count != len - 12) return 1; /* verify checksum */ for (x = 6, csum = 0; x < len - 6; x++) csum += data[x]; if (csum != nvram->checksum) return 1; return 0; } /* * 93C46 EEPROM reading. * * GPIO0 - data in * GPIO1 - data out * GPIO2 - clock * GPIO4 - chip select * * Used by Tekram. */ /* * Pulse clock bit in GPIO0 */ static void T93C46_Clk(struct sym_device *np, u_char *gpreg) { OUTB(np, nc_gpreg, *gpreg | 0x04); INB(np, nc_mbox1); udelay(2); OUTB(np, nc_gpreg, *gpreg); } /* * Read bit from NVRAM */ static void T93C46_Read_Bit(struct sym_device *np, u_char *read_bit, u_char *gpreg) { udelay(2); T93C46_Clk(np, gpreg); *read_bit = INB(np, nc_gpreg); } /* * Write bit to GPIO0 */ static void T93C46_Write_Bit(struct sym_device *np, u_char write_bit, u_char *gpreg) { if (write_bit & 0x01) *gpreg |= 0x02; else *gpreg &= 0xfd; *gpreg |= 0x10; OUTB(np, nc_gpreg, *gpreg); INB(np, nc_mbox1); udelay(2); T93C46_Clk(np, gpreg); } /* * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZZzzz!! */ static void T93C46_Stop(struct sym_device *np, u_char *gpreg) { *gpreg &= 0xef; OUTB(np, nc_gpreg, *gpreg); INB(np, nc_mbox1); udelay(2); T93C46_Clk(np, gpreg); } /* * Send read command and address to NVRAM */ static void T93C46_Send_Command(struct sym_device *np, u_short write_data, u_char *read_bit, u_char *gpreg) { int x; /* send 9 bits, start bit (1), command (2), address (6) */ for (x = 0; x < 9; x++) T93C46_Write_Bit(np, (u_char) (write_data >> (8 - x)), gpreg); *read_bit = INB(np, nc_gpreg); } /* * READ 2 bytes from the NVRAM */ static void T93C46_Read_Word(struct sym_device *np, unsigned short *nvram_data, unsigned char *gpreg) { int x; u_char read_bit; *nvram_data = 0; for (x = 0; x < 16; x++) { T93C46_Read_Bit(np, &read_bit, gpreg); if (read_bit & 0x01) *nvram_data |= (0x01 << (15 - x)); else *nvram_data &= ~(0x01 << (15 - x)); } } /* * Read Tekram NvRAM data. */ static int T93C46_Read_Data(struct sym_device *np, unsigned short *data, int len, unsigned char *gpreg) { int x; for (x = 0; x < len; x++) { unsigned char read_bit; /* output read command and address */ T93C46_Send_Command(np, 0x180 | x, &read_bit, gpreg); if (read_bit & 0x01) return 1; /* Bad */ T93C46_Read_Word(np, &data[x], gpreg); T93C46_Stop(np, gpreg); } return 0; } /* * Try reading 93C46 Tekram NVRAM. */ static int sym_read_T93C46_nvram(struct sym_device *np, Tekram_nvram *nvram) { u_char gpcntl, gpreg; u_char old_gpcntl, old_gpreg; int retv; /* save current state of GPCNTL and GPREG */ old_gpreg = INB(np, nc_gpreg); old_gpcntl = INB(np, nc_gpcntl); /* set up GPREG & GPCNTL to set GPIO0/1/2/4 in to known state, 0 in, 1/2/4 out */ gpreg = old_gpreg & 0xe9; OUTB(np, nc_gpreg, gpreg); gpcntl = (old_gpcntl & 0xe9) | 0x09; OUTB(np, nc_gpcntl, gpcntl); /* input all of NVRAM, 64 words */ retv = T93C46_Read_Data(np, (u_short *) nvram, sizeof(*nvram) / sizeof(short), &gpreg); /* return GPIO0/1/2/4 to original states after having accessed NVRAM */ OUTB(np, nc_gpcntl, old_gpcntl); OUTB(np, nc_gpreg, old_gpreg); return retv; } /* * Try reading Tekram NVRAM. * Return 0 if OK. */ static int sym_read_Tekram_nvram (struct sym_device *np, Tekram_nvram *nvram) { u_char *data = (u_char *) nvram; int len = sizeof(*nvram); u_short csum; int x; switch (np->pdev->device) { case PCI_DEVICE_ID_NCR_53C885: case PCI_DEVICE_ID_NCR_53C895: case PCI_DEVICE_ID_NCR_53C896: x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS, data, len); break; case PCI_DEVICE_ID_NCR_53C875: x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS, data, len); if (!x) break; fallthrough; default: x = sym_read_T93C46_nvram(np, nvram); break; } if (x) return 1; /* verify checksum */ for (x = 0, csum = 0; x < len - 1; x += 2) csum += data[x] + (data[x+1] << 8); if (csum != 0x1234) return 1; return 0; } #ifdef CONFIG_PARISC /* * Host firmware (PDC) keeps a table for altering SCSI capabilities. * Many newer machines export one channel of 53c896 chip as SE, 50-pin HD. * Also used for Multi-initiator SCSI clusters to set the SCSI Initiator ID. */ static int sym_read_parisc_pdc(struct sym_device *np, struct pdc_initiator *pdc) { struct hardware_path hwpath; get_pci_node_path(np->pdev, &hwpath); if (!pdc_get_initiator(&hwpath, pdc)) return 0; return SYM_PARISC_PDC; } #else static inline int sym_read_parisc_pdc(struct sym_device *np, struct pdc_initiator *x) { return 0; } #endif /* * Try reading Symbios or Tekram NVRAM */ int sym_read_nvram(struct sym_device *np, struct sym_nvram *nvp) { if (!sym_read_Symbios_nvram(np, &nvp->data.Symbios)) { nvp->type = SYM_SYMBIOS_NVRAM; sym_display_Symbios_nvram(np, &nvp->data.Symbios); } else if (!sym_read_Tekram_nvram(np, &nvp->data.Tekram)) { nvp->type = SYM_TEKRAM_NVRAM; sym_display_Tekram_nvram(np, &nvp->data.Tekram); } else { nvp->type = sym_read_parisc_pdc(np, &nvp->data.parisc); } return nvp->type; } char *sym_nvram_type(struct sym_nvram *nvp) { switch (nvp->type) { case SYM_SYMBIOS_NVRAM: return "Symbios NVRAM"; case SYM_TEKRAM_NVRAM: return "Tekram NVRAM"; case SYM_PARISC_PDC: return "PA-RISC Firmware"; default: return "No NVRAM"; } }
linux-master
drivers/scsi/sym53c8xx_2/sym_nvram.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family * of PCI-SCSI IO processors. * * Copyright (C) 1999-2001 Gerard Roudier <[email protected]> * Copyright (c) 2003-2005 Matthew Wilcox <[email protected]> * * This driver is derived from the Linux sym53c8xx driver. * Copyright (C) 1998-2000 Gerard Roudier * * The sym53c8xx driver is derived from the ncr53c8xx driver that had been * a port of the FreeBSD ncr driver to Linux-1.2.13. * * The original ncr driver has been written for 386bsd and FreeBSD by * Wolfgang Stanglmeier <[email protected]> * Stefan Esser <[email protected]> * Copyright (C) 1994 Wolfgang Stanglmeier * * Other major contributions: * * NVRAM detection and reading. * Copyright (C) 1997 Richard Waltham <[email protected]> * *----------------------------------------------------------------------------- */ #include <linux/ctype.h> #include <linux/init.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/spinlock.h> #include <scsi/scsi.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_device.h> #include <scsi/scsi_transport.h> #include "sym_glue.h" #include "sym_nvram.h" #define NAME53C "sym53c" #define NAME53C8XX "sym53c8xx" struct sym_driver_setup sym_driver_setup = SYM_LINUX_DRIVER_SETUP; unsigned int sym_debug_flags = 0; static char *excl_string; static char *safe_string; module_param_named(cmd_per_lun, sym_driver_setup.max_tag, ushort, 0); module_param_named(burst, sym_driver_setup.burst_order, byte, 0); module_param_named(led, sym_driver_setup.scsi_led, byte, 0); module_param_named(diff, sym_driver_setup.scsi_diff, byte, 0); module_param_named(irqm, sym_driver_setup.irq_mode, byte, 0); module_param_named(buschk, sym_driver_setup.scsi_bus_check, byte, 0); module_param_named(hostid, sym_driver_setup.host_id, byte, 0); module_param_named(verb, sym_driver_setup.verbose, byte, 0); module_param_named(debug, sym_debug_flags, uint, 0); module_param_named(settle, sym_driver_setup.settle_delay, byte, 0); module_param_named(nvram, sym_driver_setup.use_nvram, byte, 0); module_param_named(excl, excl_string, charp, 0); module_param_named(safe, safe_string, charp, 0); MODULE_PARM_DESC(cmd_per_lun, "The maximum number of tags to use by default"); MODULE_PARM_DESC(burst, "Maximum burst. 0 to disable, 255 to read from registers"); MODULE_PARM_DESC(led, "Set to 1 to enable LED support"); MODULE_PARM_DESC(diff, "0 for no differential mode, 1 for BIOS, 2 for always, 3 for not GPIO3"); MODULE_PARM_DESC(irqm, "0 for open drain, 1 to leave alone, 2 for totem pole"); MODULE_PARM_DESC(buschk, "0 to not check, 1 for detach on error, 2 for warn on error"); MODULE_PARM_DESC(hostid, "The SCSI ID to use for the host adapters"); MODULE_PARM_DESC(verb, "0 for minimal verbosity, 1 for normal, 2 for excessive"); MODULE_PARM_DESC(debug, "Set bits to enable debugging"); MODULE_PARM_DESC(settle, "Settle delay in seconds. Default 3"); MODULE_PARM_DESC(nvram, "Option currently not used"); MODULE_PARM_DESC(excl, "List ioport addresses here to prevent controllers from being attached"); MODULE_PARM_DESC(safe, "Set other settings to a \"safe mode\""); MODULE_LICENSE("GPL"); MODULE_VERSION(SYM_VERSION); MODULE_AUTHOR("Matthew Wilcox <[email protected]>"); MODULE_DESCRIPTION("NCR, Symbios and LSI 8xx and 1010 PCI SCSI adapters"); static void sym2_setup_params(void) { char *p = excl_string; int xi = 0; while (p && (xi < 8)) { char *next_p; int val = (int) simple_strtoul(p, &next_p, 0); sym_driver_setup.excludes[xi++] = val; p = next_p; } if (safe_string) { if (*safe_string == 'y') { sym_driver_setup.max_tag = 0; sym_driver_setup.burst_order = 0; sym_driver_setup.scsi_led = 0; sym_driver_setup.scsi_diff = 1; sym_driver_setup.irq_mode = 0; sym_driver_setup.scsi_bus_check = 2; sym_driver_setup.host_id = 7; sym_driver_setup.verbose = 2; sym_driver_setup.settle_delay = 10; sym_driver_setup.use_nvram = 1; } else if (*safe_string != 'n') { printk(KERN_WARNING NAME53C8XX "Ignoring parameter %s" " passed to safe option", safe_string); } } } static struct scsi_transport_template *sym2_transport_template = NULL; /* * Driver private area in the SCSI command structure. */ struct sym_ucmd { /* Override the SCSI pointer structure */ struct completion *eh_done; /* SCSI error handling */ }; #define SYM_UCMD_PTR(cmd) ((struct sym_ucmd *)scsi_cmd_priv(cmd)) #define SYM_SOFTC_PTR(cmd) sym_get_hcb(cmd->device->host) /* * Complete a pending CAM CCB. */ void sym_xpt_done(struct sym_hcb *np, struct scsi_cmnd *cmd) { struct sym_ucmd *ucmd = SYM_UCMD_PTR(cmd); if (ucmd->eh_done) complete(ucmd->eh_done); scsi_dma_unmap(cmd); scsi_done(cmd); } /* * Tell the SCSI layer about a BUS RESET. */ void sym_xpt_async_bus_reset(struct sym_hcb *np) { printf_notice("%s: SCSI BUS has been reset.\n", sym_name(np)); np->s.settle_time = jiffies + sym_driver_setup.settle_delay * HZ; np->s.settle_time_valid = 1; if (sym_verbose >= 2) printf_info("%s: command processing suspended for %d seconds\n", sym_name(np), sym_driver_setup.settle_delay); } /* * Choose the more appropriate CAM status if * the IO encountered an extended error. */ static int sym_xerr_cam_status(int cam_status, int x_status) { if (x_status) { if (x_status & XE_PARITY_ERR) cam_status = DID_PARITY; else cam_status = DID_ERROR; } return cam_status; } /* * Build CAM result for a failed or auto-sensed IO. */ void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid) { struct scsi_cmnd *cmd = cp->cmd; u_int cam_status, scsi_status; cam_status = DID_OK; scsi_status = cp->ssss_status; if (cp->host_flags & HF_SENSE) { scsi_status = cp->sv_scsi_status; resid = cp->sv_resid; if (sym_verbose && cp->sv_xerr_status) sym_print_xerr(cmd, cp->sv_xerr_status); if (cp->host_status == HS_COMPLETE && cp->ssss_status == S_GOOD && cp->xerr_status == 0) { cam_status = sym_xerr_cam_status(DID_OK, cp->sv_xerr_status); /* * Bounce back the sense data to user. */ memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); memcpy(cmd->sense_buffer, cp->sns_bbuf, min(SCSI_SENSE_BUFFERSIZE, SYM_SNS_BBUF_LEN)); #if 0 /* * If the device reports a UNIT ATTENTION condition * due to a RESET condition, we should consider all * disconnect CCBs for this unit as aborted. */ if (1) { u_char *p; p = (u_char *) cmd->sense_data; if (p[0]==0x70 && p[2]==0x6 && p[12]==0x29) sym_clear_tasks(np, DID_ABORT, cp->target,cp->lun, -1); } #endif } else { /* * Error return from our internal request sense. This * is bad: we must clear the contingent allegiance * condition otherwise the device will always return * BUSY. Use a big stick. */ sym_reset_scsi_target(np, cmd->device->id); cam_status = DID_ERROR; } } else if (cp->host_status == HS_COMPLETE) /* Bad SCSI status */ cam_status = DID_OK; else if (cp->host_status == HS_SEL_TIMEOUT) /* Selection timeout */ cam_status = DID_NO_CONNECT; else if (cp->host_status == HS_UNEXPECTED) /* Unexpected BUS FREE*/ cam_status = DID_ERROR; else { /* Extended error */ if (sym_verbose) { sym_print_addr(cmd, "COMMAND FAILED (%x %x %x).\n", cp->host_status, cp->ssss_status, cp->xerr_status); } /* * Set the most appropriate value for CAM status. */ cam_status = sym_xerr_cam_status(DID_ERROR, cp->xerr_status); } scsi_set_resid(cmd, resid); cmd->result = (cam_status << 16) | scsi_status; } static int sym_scatter(struct sym_hcb *np, struct sym_ccb *cp, struct scsi_cmnd *cmd) { int segment; int use_sg; cp->data_len = 0; use_sg = scsi_dma_map(cmd); if (use_sg > 0) { struct scatterlist *sg; struct sym_tcb *tp = &np->target[cp->target]; struct sym_tblmove *data; if (use_sg > SYM_CONF_MAX_SG) { scsi_dma_unmap(cmd); return -1; } data = &cp->phys.data[SYM_CONF_MAX_SG - use_sg]; scsi_for_each_sg(cmd, sg, use_sg, segment) { dma_addr_t baddr = sg_dma_address(sg); unsigned int len = sg_dma_len(sg); if ((len & 1) && (tp->head.wval & EWS)) { len++; cp->odd_byte_adjustment++; } sym_build_sge(np, &data[segment], baddr, len); cp->data_len += len; } } else { segment = -2; } return segment; } /* * Queue a SCSI command. */ static int sym_queue_command(struct sym_hcb *np, struct scsi_cmnd *cmd) { struct scsi_device *sdev = cmd->device; struct sym_tcb *tp; struct sym_lcb *lp; struct sym_ccb *cp; int order; /* * Retrieve the target descriptor. */ tp = &np->target[sdev->id]; /* * Select tagged/untagged. */ lp = sym_lp(tp, sdev->lun); order = (lp && lp->s.reqtags) ? M_SIMPLE_TAG : 0; /* * Queue the SCSI IO. */ cp = sym_get_ccb(np, cmd, order); if (!cp) return 1; /* Means resource shortage */ sym_queue_scsiio(np, cmd, cp); return 0; } /* * Setup buffers and pointers that address the CDB. */ static inline int sym_setup_cdb(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp) { memcpy(cp->cdb_buf, cmd->cmnd, cmd->cmd_len); cp->phys.cmd.addr = CCB_BA(cp, cdb_buf[0]); cp->phys.cmd.size = cpu_to_scr(cmd->cmd_len); return 0; } /* * Setup pointers that address the data and start the I/O. */ int sym_setup_data_and_start(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp) { u32 lastp, goalp; int dir; /* * Build the CDB. */ if (sym_setup_cdb(np, cmd, cp)) goto out_abort; /* * No direction means no data. */ dir = cmd->sc_data_direction; if (dir != DMA_NONE) { cp->segments = sym_scatter(np, cp, cmd); if (cp->segments < 0) { sym_set_cam_status(cmd, DID_ERROR); goto out_abort; } /* * No segments means no data. */ if (!cp->segments) dir = DMA_NONE; } else { cp->data_len = 0; cp->segments = 0; } /* * Set the data pointer. */ switch (dir) { case DMA_BIDIRECTIONAL: scmd_printk(KERN_INFO, cmd, "got DMA_BIDIRECTIONAL command"); sym_set_cam_status(cmd, DID_ERROR); goto out_abort; case DMA_TO_DEVICE: goalp = SCRIPTA_BA(np, data_out2) + 8; lastp = goalp - 8 - (cp->segments * (2*4)); break; case DMA_FROM_DEVICE: cp->host_flags |= HF_DATA_IN; goalp = SCRIPTA_BA(np, data_in2) + 8; lastp = goalp - 8 - (cp->segments * (2*4)); break; case DMA_NONE: default: lastp = goalp = SCRIPTB_BA(np, no_data); break; } /* * Set all pointers values needed by SCRIPTS. */ cp->phys.head.lastp = cpu_to_scr(lastp); cp->phys.head.savep = cpu_to_scr(lastp); cp->startp = cp->phys.head.savep; cp->goalp = cpu_to_scr(goalp); /* * When `#ifed 1', the code below makes the driver * panic on the first attempt to write to a SCSI device. * It is the first test we want to do after a driver * change that does not seem obviously safe. :) */ #if 0 switch (cp->cdb_buf[0]) { case 0x0A: case 0x2A: case 0xAA: panic("XXXXXXXXXXXXX WRITE NOT YET ALLOWED XXXXXXXXXXXXXX\n"); break; default: break; } #endif /* * activate this job. */ sym_put_start_queue(np, cp); return 0; out_abort: sym_free_ccb(np, cp); sym_xpt_done(np, cmd); return 0; } /* * timer daemon. * * Misused to keep the driver running when * interrupts are not configured correctly. */ static void sym_timer(struct sym_hcb *np) { unsigned long thistime = jiffies; /* * Restart the timer. */ np->s.timer.expires = thistime + SYM_CONF_TIMER_INTERVAL; add_timer(&np->s.timer); /* * If we are resetting the ncr, wait for settle_time before * clearing it. Then command processing will be resumed. */ if (np->s.settle_time_valid) { if (time_before_eq(np->s.settle_time, thistime)) { if (sym_verbose >= 2 ) printk("%s: command processing resumed\n", sym_name(np)); np->s.settle_time_valid = 0; } return; } /* * Nothing to do for now, but that may come. */ if (np->s.lasttime + 4*HZ < thistime) { np->s.lasttime = thistime; } #ifdef SYM_CONF_PCIQ_MAY_MISS_COMPLETIONS /* * Some way-broken PCI bridges may lead to * completions being lost when the clearing * of the INTFLY flag by the CPU occurs * concurrently with the chip raising this flag. * If this ever happen, lost completions will * be reaped here. */ sym_wakeup_done(np); #endif } /* * PCI BUS error handler. */ void sym_log_bus_error(struct Scsi_Host *shost) { struct sym_data *sym_data = shost_priv(shost); struct pci_dev *pdev = sym_data->pdev; unsigned short pci_sts; pci_read_config_word(pdev, PCI_STATUS, &pci_sts); if (pci_sts & 0xf900) { pci_write_config_word(pdev, PCI_STATUS, pci_sts); shost_printk(KERN_WARNING, shost, "PCI bus error: status = 0x%04x\n", pci_sts & 0xf900); } } /* * queuecommand method. Entered with the host adapter lock held and * interrupts disabled. */ static int sym53c8xx_queue_command_lck(struct scsi_cmnd *cmd) { struct sym_hcb *np = SYM_SOFTC_PTR(cmd); struct sym_ucmd *ucp = SYM_UCMD_PTR(cmd); int sts = 0; memset(ucp, 0, sizeof(*ucp)); /* * Shorten our settle_time if needed for * this command not to time out. */ if (np->s.settle_time_valid && scsi_cmd_to_rq(cmd)->timeout) { unsigned long tlimit = jiffies + scsi_cmd_to_rq(cmd)->timeout; tlimit -= SYM_CONF_TIMER_INTERVAL*2; if (time_after(np->s.settle_time, tlimit)) { np->s.settle_time = tlimit; } } if (np->s.settle_time_valid) return SCSI_MLQUEUE_HOST_BUSY; sts = sym_queue_command(np, cmd); if (sts) return SCSI_MLQUEUE_HOST_BUSY; return 0; } static DEF_SCSI_QCMD(sym53c8xx_queue_command) /* * Linux entry point of the interrupt handler. */ static irqreturn_t sym53c8xx_intr(int irq, void *dev_id) { struct Scsi_Host *shost = dev_id; struct sym_data *sym_data = shost_priv(shost); irqreturn_t result; /* Avoid spinloop trying to handle interrupts on frozen device */ if (pci_channel_offline(sym_data->pdev)) return IRQ_NONE; if (DEBUG_FLAGS & DEBUG_TINY) printf_debug ("["); spin_lock(shost->host_lock); result = sym_interrupt(shost); spin_unlock(shost->host_lock); if (DEBUG_FLAGS & DEBUG_TINY) printf_debug ("]\n"); return result; } /* * Linux entry point of the timer handler */ static void sym53c8xx_timer(struct timer_list *t) { struct sym_hcb *np = from_timer(np, t, s.timer); unsigned long flags; spin_lock_irqsave(np->s.host->host_lock, flags); sym_timer(np); spin_unlock_irqrestore(np->s.host->host_lock, flags); } /* * What the eh thread wants us to perform. */ #define SYM_EH_ABORT 0 #define SYM_EH_DEVICE_RESET 1 #define SYM_EH_BUS_RESET 2 #define SYM_EH_HOST_RESET 3 /* * Generic method for our eh processing. * The 'op' argument tells what we have to do. */ static int sym_eh_handler(int op, char *opname, struct scsi_cmnd *cmd) { struct sym_ucmd *ucmd = SYM_UCMD_PTR(cmd); struct Scsi_Host *shost = cmd->device->host; struct sym_data *sym_data = shost_priv(shost); struct pci_dev *pdev = sym_data->pdev; struct sym_hcb *np = sym_data->ncb; SYM_QUEHEAD *qp; int cmd_queued = 0; int sts = -1; struct completion eh_done; scmd_printk(KERN_WARNING, cmd, "%s operation started\n", opname); /* We may be in an error condition because the PCI bus * went down. In this case, we need to wait until the * PCI bus is reset, the card is reset, and only then * proceed with the scsi error recovery. There's no * point in hurrying; take a leisurely wait. */ #define WAIT_FOR_PCI_RECOVERY 35 if (pci_channel_offline(pdev)) { int finished_reset = 0; init_completion(&eh_done); spin_lock_irq(shost->host_lock); /* Make sure we didn't race */ if (pci_channel_offline(pdev)) { BUG_ON(sym_data->io_reset); sym_data->io_reset = &eh_done; } else { finished_reset = 1; } spin_unlock_irq(shost->host_lock); if (!finished_reset) finished_reset = wait_for_completion_timeout (sym_data->io_reset, WAIT_FOR_PCI_RECOVERY*HZ); spin_lock_irq(shost->host_lock); sym_data->io_reset = NULL; spin_unlock_irq(shost->host_lock); if (!finished_reset) return SCSI_FAILED; } spin_lock_irq(shost->host_lock); /* This one is queued in some place -> to wait for completion */ FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { struct sym_ccb *cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); if (cp->cmd == cmd) { cmd_queued = 1; break; } } /* Try to proceed the operation we have been asked for */ sts = -1; switch(op) { case SYM_EH_ABORT: sts = sym_abort_scsiio(np, cmd, 1); break; case SYM_EH_DEVICE_RESET: sts = sym_reset_scsi_target(np, cmd->device->id); break; case SYM_EH_BUS_RESET: sym_reset_scsi_bus(np, 1); sts = 0; break; case SYM_EH_HOST_RESET: sym_reset_scsi_bus(np, 0); sym_start_up(shost, 1); sts = 0; break; default: break; } /* On error, restore everything and cross fingers :) */ if (sts) cmd_queued = 0; if (cmd_queued) { init_completion(&eh_done); ucmd->eh_done = &eh_done; spin_unlock_irq(shost->host_lock); if (!wait_for_completion_timeout(&eh_done, 5*HZ)) { ucmd->eh_done = NULL; sts = -2; } } else { spin_unlock_irq(shost->host_lock); } dev_warn(&cmd->device->sdev_gendev, "%s operation %s.\n", opname, sts==0 ? "complete" :sts==-2 ? "timed-out" : "failed"); return sts ? SCSI_FAILED : SCSI_SUCCESS; } /* * Error handlers called from the eh thread (one thread per HBA). */ static int sym53c8xx_eh_abort_handler(struct scsi_cmnd *cmd) { return sym_eh_handler(SYM_EH_ABORT, "ABORT", cmd); } static int sym53c8xx_eh_device_reset_handler(struct scsi_cmnd *cmd) { return sym_eh_handler(SYM_EH_DEVICE_RESET, "DEVICE RESET", cmd); } static int sym53c8xx_eh_bus_reset_handler(struct scsi_cmnd *cmd) { return sym_eh_handler(SYM_EH_BUS_RESET, "BUS RESET", cmd); } static int sym53c8xx_eh_host_reset_handler(struct scsi_cmnd *cmd) { return sym_eh_handler(SYM_EH_HOST_RESET, "HOST RESET", cmd); } /* * Tune device queuing depth, according to various limits. */ static void sym_tune_dev_queuing(struct sym_tcb *tp, int lun, u_short reqtags) { struct sym_lcb *lp = sym_lp(tp, lun); u_short oldtags; if (!lp) return; oldtags = lp->s.reqtags; if (reqtags > lp->s.scdev_depth) reqtags = lp->s.scdev_depth; lp->s.reqtags = reqtags; if (reqtags != oldtags) { dev_info(&tp->starget->dev, "tagged command queuing %s, command queue depth %d.\n", lp->s.reqtags ? "enabled" : "disabled", reqtags); } } static int sym53c8xx_slave_alloc(struct scsi_device *sdev) { struct sym_hcb *np = sym_get_hcb(sdev->host); struct sym_tcb *tp = &np->target[sdev->id]; struct sym_lcb *lp; unsigned long flags; int error; if (sdev->id >= SYM_CONF_MAX_TARGET || sdev->lun >= SYM_CONF_MAX_LUN) return -ENXIO; spin_lock_irqsave(np->s.host->host_lock, flags); /* * Fail the device init if the device is flagged NOSCAN at BOOT in * the NVRAM. This may speed up boot and maintain coherency with * BIOS device numbering. Clearing the flag allows the user to * rescan skipped devices later. We also return an error for * devices not flagged for SCAN LUNS in the NVRAM since some single * lun devices behave badly when asked for a non zero LUN. */ if (tp->usrflags & SYM_SCAN_BOOT_DISABLED) { tp->usrflags &= ~SYM_SCAN_BOOT_DISABLED; starget_printk(KERN_INFO, sdev->sdev_target, "Scan at boot disabled in NVRAM\n"); error = -ENXIO; goto out; } if (tp->usrflags & SYM_SCAN_LUNS_DISABLED) { if (sdev->lun != 0) { error = -ENXIO; goto out; } starget_printk(KERN_INFO, sdev->sdev_target, "Multiple LUNs disabled in NVRAM\n"); } lp = sym_alloc_lcb(np, sdev->id, sdev->lun); if (!lp) { error = -ENOMEM; goto out; } if (tp->nlcb == 1) tp->starget = sdev->sdev_target; spi_min_period(tp->starget) = tp->usr_period; spi_max_width(tp->starget) = tp->usr_width; error = 0; out: spin_unlock_irqrestore(np->s.host->host_lock, flags); return error; } /* * Linux entry point for device queue sizing. */ static int sym53c8xx_slave_configure(struct scsi_device *sdev) { struct sym_hcb *np = sym_get_hcb(sdev->host); struct sym_tcb *tp = &np->target[sdev->id]; struct sym_lcb *lp = sym_lp(tp, sdev->lun); int reqtags, depth_to_use; /* * Get user flags. */ lp->curr_flags = lp->user_flags; /* * Select queue depth from driver setup. * Do not use more than configured by user. * Use at least 1. * Do not use more than our maximum. */ reqtags = sym_driver_setup.max_tag; if (reqtags > tp->usrtags) reqtags = tp->usrtags; if (!sdev->tagged_supported) reqtags = 0; if (reqtags > SYM_CONF_MAX_TAG) reqtags = SYM_CONF_MAX_TAG; depth_to_use = reqtags ? reqtags : 1; scsi_change_queue_depth(sdev, depth_to_use); lp->s.scdev_depth = depth_to_use; sym_tune_dev_queuing(tp, sdev->lun, reqtags); if (!spi_initial_dv(sdev->sdev_target)) spi_dv_device(sdev); return 0; } static void sym53c8xx_slave_destroy(struct scsi_device *sdev) { struct sym_hcb *np = sym_get_hcb(sdev->host); struct sym_tcb *tp = &np->target[sdev->id]; struct sym_lcb *lp = sym_lp(tp, sdev->lun); unsigned long flags; /* if slave_alloc returned before allocating a sym_lcb, return */ if (!lp) return; spin_lock_irqsave(np->s.host->host_lock, flags); if (lp->busy_itlq || lp->busy_itl) { /* * This really shouldn't happen, but we can't return an error * so let's try to stop all on-going I/O. */ starget_printk(KERN_WARNING, tp->starget, "Removing busy LCB (%d)\n", (u8)sdev->lun); sym_reset_scsi_bus(np, 1); } if (sym_free_lcb(np, sdev->id, sdev->lun) == 0) { /* * It was the last unit for this target. */ tp->head.sval = 0; tp->head.wval = np->rv_scntl3; tp->head.uval = 0; tp->tgoal.check_nego = 1; tp->starget = NULL; } spin_unlock_irqrestore(np->s.host->host_lock, flags); } /* * Linux entry point for info() function */ static const char *sym53c8xx_info (struct Scsi_Host *host) { return SYM_DRIVER_NAME; } #ifdef SYM_LINUX_PROC_INFO_SUPPORT /* * Proc file system stuff * * A read operation returns adapter information. * A write operation is a control command. * The string is parsed in the driver code and the command is passed * to the sym_usercmd() function. */ #ifdef SYM_LINUX_USER_COMMAND_SUPPORT struct sym_usrcmd { u_long target; u_long lun; u_long data; u_long cmd; }; #define UC_SETSYNC 10 #define UC_SETTAGS 11 #define UC_SETDEBUG 12 #define UC_SETWIDE 14 #define UC_SETFLAG 15 #define UC_SETVERBOSE 17 #define UC_RESETDEV 18 #define UC_CLEARDEV 19 static void sym_exec_user_command (struct sym_hcb *np, struct sym_usrcmd *uc) { struct sym_tcb *tp; int t, l; switch (uc->cmd) { case 0: return; #ifdef SYM_LINUX_DEBUG_CONTROL_SUPPORT case UC_SETDEBUG: sym_debug_flags = uc->data; break; #endif case UC_SETVERBOSE: np->verbose = uc->data; break; default: /* * We assume that other commands apply to targets. * This should always be the case and avoid the below * 4 lines to be repeated 6 times. */ for (t = 0; t < SYM_CONF_MAX_TARGET; t++) { if (!((uc->target >> t) & 1)) continue; tp = &np->target[t]; if (!tp->nlcb) continue; switch (uc->cmd) { case UC_SETSYNC: if (!uc->data || uc->data >= 255) { tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0; tp->tgoal.offset = 0; } else if (uc->data <= 9 && np->minsync_dt) { if (uc->data < np->minsync_dt) uc->data = np->minsync_dt; tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 1; tp->tgoal.width = 1; tp->tgoal.period = uc->data; tp->tgoal.offset = np->maxoffs_dt; } else { if (uc->data < np->minsync) uc->data = np->minsync; tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0; tp->tgoal.period = uc->data; tp->tgoal.offset = np->maxoffs; } tp->tgoal.check_nego = 1; break; case UC_SETWIDE: tp->tgoal.width = uc->data ? 1 : 0; tp->tgoal.check_nego = 1; break; case UC_SETTAGS: for (l = 0; l < SYM_CONF_MAX_LUN; l++) sym_tune_dev_queuing(tp, l, uc->data); break; case UC_RESETDEV: tp->to_reset = 1; np->istat_sem = SEM; OUTB(np, nc_istat, SIGP|SEM); break; case UC_CLEARDEV: for (l = 0; l < SYM_CONF_MAX_LUN; l++) { struct sym_lcb *lp = sym_lp(tp, l); if (lp) lp->to_clear = 1; } np->istat_sem = SEM; OUTB(np, nc_istat, SIGP|SEM); break; case UC_SETFLAG: tp->usrflags = uc->data; break; } } break; } } static int sym_skip_spaces(char *ptr, int len) { int cnt, c; for (cnt = len; cnt > 0 && (c = *ptr++) && isspace(c); cnt--); return (len - cnt); } static int get_int_arg(char *ptr, int len, u_long *pv) { char *end; *pv = simple_strtoul(ptr, &end, 10); return (end - ptr); } static int is_keyword(char *ptr, int len, char *verb) { int verb_len = strlen(verb); if (len >= verb_len && !memcmp(verb, ptr, verb_len)) return verb_len; else return 0; } #define SKIP_SPACES(ptr, len) \ if ((arg_len = sym_skip_spaces(ptr, len)) < 1) \ return -EINVAL; \ ptr += arg_len; len -= arg_len; #define GET_INT_ARG(ptr, len, v) \ if (!(arg_len = get_int_arg(ptr, len, &(v)))) \ return -EINVAL; \ ptr += arg_len; len -= arg_len; /* * Parse a control command */ static int sym_user_command(struct Scsi_Host *shost, char *buffer, int length) { struct sym_hcb *np = sym_get_hcb(shost); char *ptr = buffer; int len = length; struct sym_usrcmd cmd, *uc = &cmd; int arg_len; u_long target; memset(uc, 0, sizeof(*uc)); if (len > 0 && ptr[len-1] == '\n') --len; if ((arg_len = is_keyword(ptr, len, "setsync")) != 0) uc->cmd = UC_SETSYNC; else if ((arg_len = is_keyword(ptr, len, "settags")) != 0) uc->cmd = UC_SETTAGS; else if ((arg_len = is_keyword(ptr, len, "setverbose")) != 0) uc->cmd = UC_SETVERBOSE; else if ((arg_len = is_keyword(ptr, len, "setwide")) != 0) uc->cmd = UC_SETWIDE; #ifdef SYM_LINUX_DEBUG_CONTROL_SUPPORT else if ((arg_len = is_keyword(ptr, len, "setdebug")) != 0) uc->cmd = UC_SETDEBUG; #endif else if ((arg_len = is_keyword(ptr, len, "setflag")) != 0) uc->cmd = UC_SETFLAG; else if ((arg_len = is_keyword(ptr, len, "resetdev")) != 0) uc->cmd = UC_RESETDEV; else if ((arg_len = is_keyword(ptr, len, "cleardev")) != 0) uc->cmd = UC_CLEARDEV; else arg_len = 0; #ifdef DEBUG_PROC_INFO printk("sym_user_command: arg_len=%d, cmd=%ld\n", arg_len, uc->cmd); #endif if (!arg_len) return -EINVAL; ptr += arg_len; len -= arg_len; switch(uc->cmd) { case UC_SETSYNC: case UC_SETTAGS: case UC_SETWIDE: case UC_SETFLAG: case UC_RESETDEV: case UC_CLEARDEV: SKIP_SPACES(ptr, len); if ((arg_len = is_keyword(ptr, len, "all")) != 0) { ptr += arg_len; len -= arg_len; uc->target = ~0; } else { GET_INT_ARG(ptr, len, target); uc->target = (1<<target); #ifdef DEBUG_PROC_INFO printk("sym_user_command: target=%ld\n", target); #endif } break; } switch(uc->cmd) { case UC_SETVERBOSE: case UC_SETSYNC: case UC_SETTAGS: case UC_SETWIDE: SKIP_SPACES(ptr, len); GET_INT_ARG(ptr, len, uc->data); #ifdef DEBUG_PROC_INFO printk("sym_user_command: data=%ld\n", uc->data); #endif break; #ifdef SYM_LINUX_DEBUG_CONTROL_SUPPORT case UC_SETDEBUG: while (len > 0) { SKIP_SPACES(ptr, len); if ((arg_len = is_keyword(ptr, len, "alloc"))) uc->data |= DEBUG_ALLOC; else if ((arg_len = is_keyword(ptr, len, "phase"))) uc->data |= DEBUG_PHASE; else if ((arg_len = is_keyword(ptr, len, "queue"))) uc->data |= DEBUG_QUEUE; else if ((arg_len = is_keyword(ptr, len, "result"))) uc->data |= DEBUG_RESULT; else if ((arg_len = is_keyword(ptr, len, "scatter"))) uc->data |= DEBUG_SCATTER; else if ((arg_len = is_keyword(ptr, len, "script"))) uc->data |= DEBUG_SCRIPT; else if ((arg_len = is_keyword(ptr, len, "tiny"))) uc->data |= DEBUG_TINY; else if ((arg_len = is_keyword(ptr, len, "timing"))) uc->data |= DEBUG_TIMING; else if ((arg_len = is_keyword(ptr, len, "nego"))) uc->data |= DEBUG_NEGO; else if ((arg_len = is_keyword(ptr, len, "tags"))) uc->data |= DEBUG_TAGS; else if ((arg_len = is_keyword(ptr, len, "pointer"))) uc->data |= DEBUG_POINTER; else return -EINVAL; ptr += arg_len; len -= arg_len; } #ifdef DEBUG_PROC_INFO printk("sym_user_command: data=%ld\n", uc->data); #endif break; #endif /* SYM_LINUX_DEBUG_CONTROL_SUPPORT */ case UC_SETFLAG: while (len > 0) { SKIP_SPACES(ptr, len); if ((arg_len = is_keyword(ptr, len, "no_disc"))) uc->data &= ~SYM_DISC_ENABLED; else return -EINVAL; ptr += arg_len; len -= arg_len; } break; default: break; } if (len) return -EINVAL; else { unsigned long flags; spin_lock_irqsave(shost->host_lock, flags); sym_exec_user_command(np, uc); spin_unlock_irqrestore(shost->host_lock, flags); } return length; } #endif /* SYM_LINUX_USER_COMMAND_SUPPORT */ /* * Copy formatted information into the input buffer. */ static int sym_show_info(struct seq_file *m, struct Scsi_Host *shost) { #ifdef SYM_LINUX_USER_INFO_SUPPORT struct sym_data *sym_data = shost_priv(shost); struct pci_dev *pdev = sym_data->pdev; struct sym_hcb *np = sym_data->ncb; seq_printf(m, "Chip " NAME53C "%s, device id 0x%x, " "revision id 0x%x\n", np->s.chip_name, pdev->device, pdev->revision); seq_printf(m, "At PCI address %s, IRQ %u\n", pci_name(pdev), pdev->irq); seq_printf(m, "Min. period factor %d, %s SCSI BUS%s\n", (int) (np->minsync_dt ? np->minsync_dt : np->minsync), np->maxwide ? "Wide" : "Narrow", np->minsync_dt ? ", DT capable" : ""); seq_printf(m, "Max. started commands %d, " "max. commands per LUN %d\n", SYM_CONF_MAX_START, SYM_CONF_MAX_TAG); return 0; #else return -EINVAL; #endif /* SYM_LINUX_USER_INFO_SUPPORT */ } #endif /* SYM_LINUX_PROC_INFO_SUPPORT */ /* * Free resources claimed by sym_iomap_device(). Note that * sym_free_resources() should be used instead of this function after calling * sym_attach(). */ static void sym_iounmap_device(struct sym_device *device) { if (device->s.ioaddr) pci_iounmap(device->pdev, device->s.ioaddr); if (device->s.ramaddr) pci_iounmap(device->pdev, device->s.ramaddr); } /* * Free controller resources. */ static void sym_free_resources(struct sym_hcb *np, struct pci_dev *pdev, int do_free_irq) { /* * Free O/S specific resources. */ if (do_free_irq) free_irq(pdev->irq, np->s.host); if (np->s.ioaddr) pci_iounmap(pdev, np->s.ioaddr); if (np->s.ramaddr) pci_iounmap(pdev, np->s.ramaddr); /* * Free O/S independent resources. */ sym_hcb_free(np); sym_mfree_dma(np, sizeof(*np), "HCB"); } /* * Host attach and initialisations. * * Allocate host data and ncb structure. * Remap MMIO region. * Do chip initialization. * If all is OK, install interrupt handling and * start the timer daemon. */ static struct Scsi_Host *sym_attach(const struct scsi_host_template *tpnt, int unit, struct sym_device *dev) { struct sym_data *sym_data; struct sym_hcb *np = NULL; struct Scsi_Host *shost = NULL; struct pci_dev *pdev = dev->pdev; unsigned long flags; struct sym_fw *fw; int do_free_irq = 0; printk(KERN_INFO "sym%d: <%s> rev 0x%x at pci %s irq %u\n", unit, dev->chip.name, pdev->revision, pci_name(pdev), pdev->irq); /* * Get the firmware for this chip. */ fw = sym_find_firmware(&dev->chip); if (!fw) goto attach_failed; shost = scsi_host_alloc(tpnt, sizeof(*sym_data)); if (!shost) goto attach_failed; sym_data = shost_priv(shost); /* * Allocate immediately the host control block, * since we are only expecting to succeed. :) * We keep track in the HCB of all the resources that * are to be released on error. */ np = __sym_calloc_dma(&pdev->dev, sizeof(*np), "HCB"); if (!np) goto attach_failed; np->bus_dmat = &pdev->dev; /* Result in 1 DMA pool per HBA */ sym_data->ncb = np; sym_data->pdev = pdev; np->s.host = shost; pci_set_drvdata(pdev, shost); /* * Copy some useful infos to the HCB. */ np->hcb_ba = vtobus(np); np->verbose = sym_driver_setup.verbose; np->s.unit = unit; np->features = dev->chip.features; np->clock_divn = dev->chip.nr_divisor; np->maxoffs = dev->chip.offset_max; np->maxburst = dev->chip.burst_max; np->myaddr = dev->host_id; np->mmio_ba = (u32)dev->mmio_base; np->ram_ba = (u32)dev->ram_base; np->s.ioaddr = dev->s.ioaddr; np->s.ramaddr = dev->s.ramaddr; /* * Edit its name. */ strscpy(np->s.chip_name, dev->chip.name, sizeof(np->s.chip_name)); sprintf(np->s.inst_name, "sym%d", np->s.unit); if ((SYM_CONF_DMA_ADDRESSING_MODE > 0) && (np->features & FE_DAC) && !dma_set_mask(&pdev->dev, DMA_DAC_MASK)) { set_dac(np); } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { printf_warning("%s: No suitable DMA available\n", sym_name(np)); goto attach_failed; } if (sym_hcb_attach(shost, fw, dev->nvram)) goto attach_failed; /* * Install the interrupt handler. * If we synchonize the C code with SCRIPTS on interrupt, * we do not want to share the INTR line at all. */ if (request_irq(pdev->irq, sym53c8xx_intr, IRQF_SHARED, NAME53C8XX, shost)) { printf_err("%s: request irq %u failure\n", sym_name(np), pdev->irq); goto attach_failed; } do_free_irq = 1; /* * After SCSI devices have been opened, we cannot * reset the bus safely, so we do it here. */ spin_lock_irqsave(shost->host_lock, flags); if (sym_reset_scsi_bus(np, 0)) goto reset_failed; /* * Start the SCRIPTS. */ sym_start_up(shost, 1); /* * Start the timer daemon */ timer_setup(&np->s.timer, sym53c8xx_timer, 0); np->s.lasttime=0; sym_timer (np); /* * Fill Linux host instance structure * and return success. */ shost->max_channel = 0; shost->this_id = np->myaddr; shost->max_id = np->maxwide ? 16 : 8; shost->max_lun = SYM_CONF_MAX_LUN; shost->unique_id = pci_resource_start(pdev, 0); shost->cmd_per_lun = SYM_CONF_MAX_TAG; shost->can_queue = (SYM_CONF_MAX_START-2); shost->sg_tablesize = SYM_CONF_MAX_SG; shost->max_cmd_len = 16; BUG_ON(sym2_transport_template == NULL); shost->transportt = sym2_transport_template; /* 53c896 rev 1 errata: DMA may not cross 16MB boundary */ if (pdev->device == PCI_DEVICE_ID_NCR_53C896 && pdev->revision < 2) shost->dma_boundary = 0xFFFFFF; spin_unlock_irqrestore(shost->host_lock, flags); return shost; reset_failed: printf_err("%s: FATAL ERROR: CHECK SCSI BUS - CABLES, " "TERMINATION, DEVICE POWER etc.!\n", sym_name(np)); spin_unlock_irqrestore(shost->host_lock, flags); attach_failed: printf_info("sym%d: giving up ...\n", unit); if (np) sym_free_resources(np, pdev, do_free_irq); else sym_iounmap_device(dev); if (shost) scsi_host_put(shost); return NULL; } /* * Detect and try to read SYMBIOS and TEKRAM NVRAM. */ #if SYM_CONF_NVRAM_SUPPORT static void sym_get_nvram(struct sym_device *devp, struct sym_nvram *nvp) { devp->nvram = nvp; nvp->type = 0; sym_read_nvram(devp, nvp); } #else static inline void sym_get_nvram(struct sym_device *devp, struct sym_nvram *nvp) { } #endif /* SYM_CONF_NVRAM_SUPPORT */ static int sym_check_supported(struct sym_device *device) { struct sym_chip *chip; struct pci_dev *pdev = device->pdev; unsigned long io_port = pci_resource_start(pdev, 0); int i; /* * If user excluded this chip, do not initialize it. * I hate this code so much. Must kill it. */ if (io_port) { for (i = 0 ; i < 8 ; i++) { if (sym_driver_setup.excludes[i] == io_port) return -ENODEV; } } /* * Check if the chip is supported. Then copy the chip description * to our device structure so we can make it match the actual device * and options. */ chip = sym_lookup_chip_table(pdev->device, pdev->revision); if (!chip) { dev_info(&pdev->dev, "device not supported\n"); return -ENODEV; } memcpy(&device->chip, chip, sizeof(device->chip)); return 0; } /* * Ignore Symbios chips controlled by various RAID controllers. * These controllers set value 0x52414944 at RAM end - 16. */ static int sym_check_raid(struct sym_device *device) { unsigned int ram_size, ram_val; if (!device->s.ramaddr) return 0; if (device->chip.features & FE_RAM8K) ram_size = 8192; else ram_size = 4096; ram_val = readl(device->s.ramaddr + ram_size - 16); if (ram_val != 0x52414944) return 0; dev_info(&device->pdev->dev, "not initializing, driven by RAID controller.\n"); return -ENODEV; } static int sym_set_workarounds(struct sym_device *device) { struct sym_chip *chip = &device->chip; struct pci_dev *pdev = device->pdev; u_short status_reg; /* * (ITEM 12 of a DEL about the 896 I haven't yet). * We must ensure the chip will use WRITE AND INVALIDATE. * The revision number limit is for now arbitrary. */ if (pdev->device == PCI_DEVICE_ID_NCR_53C896 && pdev->revision < 0x4) { chip->features |= (FE_WRIE | FE_CLSE); } /* If the chip can do Memory Write Invalidate, enable it */ if (chip->features & FE_WRIE) { if (pci_set_mwi(pdev)) return -ENODEV; } /* * Work around for errant bit in 895A. The 66Mhz * capable bit is set erroneously. Clear this bit. * (Item 1 DEL 533) * * Make sure Config space and Features agree. * * Recall: writes are not normal to status register - * write a 1 to clear and a 0 to leave unchanged. * Can only reset bits. */ pci_read_config_word(pdev, PCI_STATUS, &status_reg); if (chip->features & FE_66MHZ) { if (!(status_reg & PCI_STATUS_66MHZ)) chip->features &= ~FE_66MHZ; } else { if (status_reg & PCI_STATUS_66MHZ) { status_reg = PCI_STATUS_66MHZ; pci_write_config_word(pdev, PCI_STATUS, status_reg); pci_read_config_word(pdev, PCI_STATUS, &status_reg); } } return 0; } /* * Map HBA registers and on-chip SRAM (if present). */ static int sym_iomap_device(struct sym_device *device) { struct pci_dev *pdev = device->pdev; struct pci_bus_region bus_addr; int i = 2; pcibios_resource_to_bus(pdev->bus, &bus_addr, &pdev->resource[1]); device->mmio_base = bus_addr.start; if (device->chip.features & FE_RAM) { /* * If the BAR is 64-bit, resource 2 will be occupied by the * upper 32 bits */ if (!pdev->resource[i].flags) i++; pcibios_resource_to_bus(pdev->bus, &bus_addr, &pdev->resource[i]); device->ram_base = bus_addr.start; } #ifdef CONFIG_SCSI_SYM53C8XX_MMIO if (device->mmio_base) device->s.ioaddr = pci_iomap(pdev, 1, pci_resource_len(pdev, 1)); #endif if (!device->s.ioaddr) device->s.ioaddr = pci_iomap(pdev, 0, pci_resource_len(pdev, 0)); if (!device->s.ioaddr) { dev_err(&pdev->dev, "could not map registers; giving up.\n"); return -EIO; } if (device->ram_base) { device->s.ramaddr = pci_iomap(pdev, i, pci_resource_len(pdev, i)); if (!device->s.ramaddr) { dev_warn(&pdev->dev, "could not map SRAM; continuing anyway.\n"); device->ram_base = 0; } } return 0; } /* * The NCR PQS and PDS cards are constructed as a DEC bridge * behind which sits a proprietary NCR memory controller and * either four or two 53c875s as separate devices. We can tell * if an 875 is part of a PQS/PDS or not since if it is, it will * be on the same bus as the memory controller. In its usual * mode of operation, the 875s are slaved to the memory * controller for all transfers. To operate with the Linux * driver, the memory controller is disabled and the 875s * freed to function independently. The only wrinkle is that * the preset SCSI ID (which may be zero) must be read in from * a special configuration space register of the 875. */ static void sym_config_pqs(struct pci_dev *pdev, struct sym_device *sym_dev) { int slot; u8 tmp; for (slot = 0; slot < 256; slot++) { struct pci_dev *memc = pci_get_slot(pdev->bus, slot); if (!memc || memc->vendor != 0x101a || memc->device == 0x0009) { pci_dev_put(memc); continue; } /* bit 1: allow individual 875 configuration */ pci_read_config_byte(memc, 0x44, &tmp); if ((tmp & 0x2) == 0) { tmp |= 0x2; pci_write_config_byte(memc, 0x44, tmp); } /* bit 2: drive individual 875 interrupts to the bus */ pci_read_config_byte(memc, 0x45, &tmp); if ((tmp & 0x4) == 0) { tmp |= 0x4; pci_write_config_byte(memc, 0x45, tmp); } pci_dev_put(memc); break; } pci_read_config_byte(pdev, 0x84, &tmp); sym_dev->host_id = tmp; } /* * Called before unloading the module. * Detach the host. * We have to free resources and halt the NCR chip. */ static int sym_detach(struct Scsi_Host *shost, struct pci_dev *pdev) { struct sym_hcb *np = sym_get_hcb(shost); printk("%s: detaching ...\n", sym_name(np)); del_timer_sync(&np->s.timer); /* * Reset NCR chip. * We should use sym_soft_reset(), but we don't want to do * so, since we may not be safe if interrupts occur. */ printk("%s: resetting chip\n", sym_name(np)); OUTB(np, nc_istat, SRST); INB(np, nc_mbox1); udelay(10); OUTB(np, nc_istat, 0); sym_free_resources(np, pdev, 1); scsi_host_put(shost); return 1; } /* * Driver host template. */ static const struct scsi_host_template sym2_template = { .module = THIS_MODULE, .name = "sym53c8xx", .info = sym53c8xx_info, .cmd_size = sizeof(struct sym_ucmd), .queuecommand = sym53c8xx_queue_command, .slave_alloc = sym53c8xx_slave_alloc, .slave_configure = sym53c8xx_slave_configure, .slave_destroy = sym53c8xx_slave_destroy, .eh_abort_handler = sym53c8xx_eh_abort_handler, .eh_device_reset_handler = sym53c8xx_eh_device_reset_handler, .eh_bus_reset_handler = sym53c8xx_eh_bus_reset_handler, .eh_host_reset_handler = sym53c8xx_eh_host_reset_handler, .this_id = 7, .max_sectors = 0xFFFF, #ifdef SYM_LINUX_PROC_INFO_SUPPORT .show_info = sym_show_info, #ifdef SYM_LINUX_USER_COMMAND_SUPPORT .write_info = sym_user_command, #endif .proc_name = NAME53C8XX, #endif }; static int attach_count; static int sym2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct sym_device sym_dev; struct sym_nvram nvram; struct Scsi_Host *shost; int do_iounmap = 0; int do_disable_device = 1; memset(&sym_dev, 0, sizeof(sym_dev)); memset(&nvram, 0, sizeof(nvram)); sym_dev.pdev = pdev; sym_dev.host_id = SYM_SETUP_HOST_ID; if (pci_enable_device(pdev)) goto leave; pci_set_master(pdev); if (pci_request_regions(pdev, NAME53C8XX)) goto disable; if (sym_check_supported(&sym_dev)) goto free; if (sym_iomap_device(&sym_dev)) goto free; do_iounmap = 1; if (sym_check_raid(&sym_dev)) { do_disable_device = 0; /* Don't disable the device */ goto free; } if (sym_set_workarounds(&sym_dev)) goto free; sym_config_pqs(pdev, &sym_dev); sym_get_nvram(&sym_dev, &nvram); do_iounmap = 0; /* Don't sym_iounmap_device() after sym_attach(). */ shost = sym_attach(&sym2_template, attach_count, &sym_dev); if (!shost) goto free; if (scsi_add_host(shost, &pdev->dev)) goto detach; scsi_scan_host(shost); attach_count++; return 0; detach: sym_detach(pci_get_drvdata(pdev), pdev); free: if (do_iounmap) sym_iounmap_device(&sym_dev); pci_release_regions(pdev); disable: if (do_disable_device) pci_disable_device(pdev); leave: return -ENODEV; } static void sym2_remove(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); scsi_remove_host(shost); sym_detach(shost, pdev); pci_release_regions(pdev); pci_disable_device(pdev); attach_count--; } /** * sym2_io_error_detected() - called when PCI error is detected * @pdev: pointer to PCI device * @state: current state of the PCI slot */ static pci_ers_result_t sym2_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { /* If slot is permanently frozen, turn everything off */ if (state == pci_channel_io_perm_failure) { sym2_remove(pdev); return PCI_ERS_RESULT_DISCONNECT; } disable_irq(pdev->irq); pci_disable_device(pdev); /* Request that MMIO be enabled, so register dump can be taken. */ return PCI_ERS_RESULT_CAN_RECOVER; } /** * sym2_io_slot_dump - Enable MMIO and dump debug registers * @pdev: pointer to PCI device */ static pci_ers_result_t sym2_io_slot_dump(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); sym_dump_registers(shost); /* Request a slot reset. */ return PCI_ERS_RESULT_NEED_RESET; } /** * sym2_reset_workarounds - hardware-specific work-arounds * @pdev: pointer to PCI device * * This routine is similar to sym_set_workarounds(), except * that, at this point, we already know that the device was * successfully initialized at least once before, and so most * of the steps taken there are un-needed here. */ static void sym2_reset_workarounds(struct pci_dev *pdev) { u_short status_reg; struct sym_chip *chip; chip = sym_lookup_chip_table(pdev->device, pdev->revision); /* Work around for errant bit in 895A, in a fashion * similar to what is done in sym_set_workarounds(). */ pci_read_config_word(pdev, PCI_STATUS, &status_reg); if (!(chip->features & FE_66MHZ) && (status_reg & PCI_STATUS_66MHZ)) { status_reg = PCI_STATUS_66MHZ; pci_write_config_word(pdev, PCI_STATUS, status_reg); pci_read_config_word(pdev, PCI_STATUS, &status_reg); } } /** * sym2_io_slot_reset() - called when the pci bus has been reset. * @pdev: pointer to PCI device * * Restart the card from scratch. */ static pci_ers_result_t sym2_io_slot_reset(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct sym_hcb *np = sym_get_hcb(shost); printk(KERN_INFO "%s: recovering from a PCI slot reset\n", sym_name(np)); if (pci_enable_device(pdev)) { printk(KERN_ERR "%s: Unable to enable after PCI reset\n", sym_name(np)); return PCI_ERS_RESULT_DISCONNECT; } pci_set_master(pdev); enable_irq(pdev->irq); /* If the chip can do Memory Write Invalidate, enable it */ if (np->features & FE_WRIE) { if (pci_set_mwi(pdev)) return PCI_ERS_RESULT_DISCONNECT; } /* Perform work-arounds, analogous to sym_set_workarounds() */ sym2_reset_workarounds(pdev); /* Perform host reset only on one instance of the card */ if (PCI_FUNC(pdev->devfn) == 0) { if (sym_reset_scsi_bus(np, 0)) { printk(KERN_ERR "%s: Unable to reset scsi host\n", sym_name(np)); return PCI_ERS_RESULT_DISCONNECT; } sym_start_up(shost, 1); } return PCI_ERS_RESULT_RECOVERED; } /** * sym2_io_resume() - resume normal ops after PCI reset * @pdev: pointer to PCI device * * Called when the error recovery driver tells us that its * OK to resume normal operation. Use completion to allow * halted scsi ops to resume. */ static void sym2_io_resume(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct sym_data *sym_data = shost_priv(shost); spin_lock_irq(shost->host_lock); if (sym_data->io_reset) complete(sym_data->io_reset); spin_unlock_irq(shost->host_lock); } static void sym2_get_signalling(struct Scsi_Host *shost) { struct sym_hcb *np = sym_get_hcb(shost); enum spi_signal_type type; switch (np->scsi_mode) { case SMODE_SE: type = SPI_SIGNAL_SE; break; case SMODE_LVD: type = SPI_SIGNAL_LVD; break; case SMODE_HVD: type = SPI_SIGNAL_HVD; break; default: type = SPI_SIGNAL_UNKNOWN; break; } spi_signalling(shost) = type; } static void sym2_set_offset(struct scsi_target *starget, int offset) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct sym_hcb *np = sym_get_hcb(shost); struct sym_tcb *tp = &np->target[starget->id]; tp->tgoal.offset = offset; tp->tgoal.check_nego = 1; } static void sym2_set_period(struct scsi_target *starget, int period) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct sym_hcb *np = sym_get_hcb(shost); struct sym_tcb *tp = &np->target[starget->id]; /* have to have DT for these transfers, but DT will also * set width, so check that this is allowed */ if (period <= np->minsync && spi_width(starget)) tp->tgoal.dt = 1; tp->tgoal.period = period; tp->tgoal.check_nego = 1; } static void sym2_set_width(struct scsi_target *starget, int width) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct sym_hcb *np = sym_get_hcb(shost); struct sym_tcb *tp = &np->target[starget->id]; /* It is illegal to have DT set on narrow transfers. If DT is * clear, we must also clear IU and QAS. */ if (width == 0) tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0; tp->tgoal.width = width; tp->tgoal.check_nego = 1; } static void sym2_set_dt(struct scsi_target *starget, int dt) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct sym_hcb *np = sym_get_hcb(shost); struct sym_tcb *tp = &np->target[starget->id]; /* We must clear QAS and IU if DT is clear */ if (dt) tp->tgoal.dt = 1; else tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0; tp->tgoal.check_nego = 1; } #if 0 static void sym2_set_iu(struct scsi_target *starget, int iu) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct sym_hcb *np = sym_get_hcb(shost); struct sym_tcb *tp = &np->target[starget->id]; if (iu) tp->tgoal.iu = tp->tgoal.dt = 1; else tp->tgoal.iu = 0; tp->tgoal.check_nego = 1; } static void sym2_set_qas(struct scsi_target *starget, int qas) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct sym_hcb *np = sym_get_hcb(shost); struct sym_tcb *tp = &np->target[starget->id]; if (qas) tp->tgoal.dt = tp->tgoal.qas = 1; else tp->tgoal.qas = 0; tp->tgoal.check_nego = 1; } #endif static struct spi_function_template sym2_transport_functions = { .set_offset = sym2_set_offset, .show_offset = 1, .set_period = sym2_set_period, .show_period = 1, .set_width = sym2_set_width, .show_width = 1, .set_dt = sym2_set_dt, .show_dt = 1, #if 0 .set_iu = sym2_set_iu, .show_iu = 1, .set_qas = sym2_set_qas, .show_qas = 1, #endif .get_signalling = sym2_get_signalling, }; static struct pci_device_id sym2_id_table[] = { { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C810, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, /* new */ { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C815, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C810AP, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, /* new */ { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1510, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_SCSI<<8, 0xffff00, 0UL }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C896, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C895, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C885, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C875, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C1510, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_SCSI<<8, 0xffff00, 0UL }, /* new */ { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C895A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C875A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1010_33, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1010_66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C875J, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { 0, } }; MODULE_DEVICE_TABLE(pci, sym2_id_table); static const struct pci_error_handlers sym2_err_handler = { .error_detected = sym2_io_error_detected, .mmio_enabled = sym2_io_slot_dump, .slot_reset = sym2_io_slot_reset, .resume = sym2_io_resume, }; static struct pci_driver sym2_driver = { .name = NAME53C8XX, .id_table = sym2_id_table, .probe = sym2_probe, .remove = sym2_remove, .err_handler = &sym2_err_handler, }; static int __init sym2_init(void) { int error; sym2_setup_params(); sym2_transport_template = spi_attach_transport(&sym2_transport_functions); if (!sym2_transport_template) return -ENODEV; error = pci_register_driver(&sym2_driver); if (error) spi_release_transport(sym2_transport_template); return error; } static void __exit sym2_exit(void) { pci_unregister_driver(&sym2_driver); spi_release_transport(sym2_transport_template); } module_init(sym2_init); module_exit(sym2_exit);
linux-master
drivers/scsi/sym53c8xx_2/sym_glue.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family * of PCI-SCSI IO processors. * * Copyright (C) 1999-2001 Gerard Roudier <[email protected]> * * This driver is derived from the Linux sym53c8xx driver. * Copyright (C) 1998-2000 Gerard Roudier * * The sym53c8xx driver is derived from the ncr53c8xx driver that had been * a port of the FreeBSD ncr driver to Linux-1.2.13. * * The original ncr driver has been written for 386bsd and FreeBSD by * Wolfgang Stanglmeier <[email protected]> * Stefan Esser <[email protected]> * Copyright (C) 1994 Wolfgang Stanglmeier * * Other major contributions: * * NVRAM detection and reading. * Copyright (C) 1997 Richard Waltham <[email protected]> * *----------------------------------------------------------------------------- */ #include "sym_glue.h" /* * Simple power of two buddy-like generic allocator. * Provides naturally aligned memory chunks. * * This simple code is not intended to be fast, but to * provide power of 2 aligned memory allocations. * Since the SCRIPTS processor only supplies 8 bit arithmetic, * this allocator allows simple and fast address calculations * from the SCRIPTS code. In addition, cache line alignment * is guaranteed for power of 2 cache line size. * * This allocator has been developed for the Linux sym53c8xx * driver, since this O/S does not provide naturally aligned * allocations. * It has the advantage of allowing the driver to use private * pages of memory that will be useful if we ever need to deal * with IO MMUs for PCI. */ static void *___sym_malloc(m_pool_p mp, int size) { int i = 0; int s = (1 << SYM_MEM_SHIFT); int j; void *a; m_link_p h = mp->h; if (size > SYM_MEM_CLUSTER_SIZE) return NULL; while (size > s) { s <<= 1; ++i; } j = i; while (!h[j].next) { if (s == SYM_MEM_CLUSTER_SIZE) { h[j].next = (m_link_p) M_GET_MEM_CLUSTER(); if (h[j].next) h[j].next->next = NULL; break; } ++j; s <<= 1; } a = h[j].next; if (a) { h[j].next = h[j].next->next; while (j > i) { j -= 1; s >>= 1; h[j].next = (m_link_p) (a+s); h[j].next->next = NULL; } } #ifdef DEBUG printf("___sym_malloc(%d) = %p\n", size, (void *) a); #endif return a; } /* * Counter-part of the generic allocator. */ static void ___sym_mfree(m_pool_p mp, void *ptr, int size) { int i = 0; int s = (1 << SYM_MEM_SHIFT); m_link_p q; unsigned long a, b; m_link_p h = mp->h; #ifdef DEBUG printf("___sym_mfree(%p, %d)\n", ptr, size); #endif if (size > SYM_MEM_CLUSTER_SIZE) return; while (size > s) { s <<= 1; ++i; } a = (unsigned long)ptr; while (1) { if (s == SYM_MEM_CLUSTER_SIZE) { #ifdef SYM_MEM_FREE_UNUSED M_FREE_MEM_CLUSTER((void *)a); #else ((m_link_p) a)->next = h[i].next; h[i].next = (m_link_p) a; #endif break; } b = a ^ s; q = &h[i]; while (q->next && q->next != (m_link_p) b) { q = q->next; } if (!q->next) { ((m_link_p) a)->next = h[i].next; h[i].next = (m_link_p) a; break; } q->next = q->next->next; a = a & b; s <<= 1; ++i; } } /* * Verbose and zeroing allocator that wrapps to the generic allocator. */ static void *__sym_calloc2(m_pool_p mp, int size, char *name, int uflags) { void *p; p = ___sym_malloc(mp, size); if (DEBUG_FLAGS & DEBUG_ALLOC) { printf ("new %-10s[%4d] @%p.\n", name, size, p); } if (p) memset(p, 0, size); else if (uflags & SYM_MEM_WARN) printf ("__sym_calloc2: failed to allocate %s[%d]\n", name, size); return p; } #define __sym_calloc(mp, s, n) __sym_calloc2(mp, s, n, SYM_MEM_WARN) /* * Its counter-part. */ static void __sym_mfree(m_pool_p mp, void *ptr, int size, char *name) { if (DEBUG_FLAGS & DEBUG_ALLOC) printf ("freeing %-10s[%4d] @%p.\n", name, size, ptr); ___sym_mfree(mp, ptr, size); } /* * Default memory pool we donnot need to involve in DMA. * * With DMA abstraction, we use functions (methods), to * distinguish between non DMAable memory and DMAable memory. */ static void *___mp0_get_mem_cluster(m_pool_p mp) { void *m = sym_get_mem_cluster(); if (m) ++mp->nump; return m; } #ifdef SYM_MEM_FREE_UNUSED static void ___mp0_free_mem_cluster(m_pool_p mp, void *m) { sym_free_mem_cluster(m); --mp->nump; } #else #define ___mp0_free_mem_cluster NULL #endif static struct sym_m_pool mp0 = { NULL, ___mp0_get_mem_cluster, ___mp0_free_mem_cluster }; /* * Methods that maintains DMAable pools according to user allocations. * New pools are created on the fly when a new pool id is provided. * They are deleted on the fly when they get emptied. */ /* Get a memory cluster that matches the DMA constraints of a given pool */ static void * ___get_dma_mem_cluster(m_pool_p mp) { m_vtob_p vbp; void *vaddr; vbp = __sym_calloc(&mp0, sizeof(*vbp), "VTOB"); if (!vbp) goto out_err; vaddr = sym_m_get_dma_mem_cluster(mp, vbp); if (vaddr) { int hc = VTOB_HASH_CODE(vaddr); vbp->next = mp->vtob[hc]; mp->vtob[hc] = vbp; ++mp->nump; } return vaddr; out_err: return NULL; } #ifdef SYM_MEM_FREE_UNUSED /* Free a memory cluster and associated resources for DMA */ static void ___free_dma_mem_cluster(m_pool_p mp, void *m) { m_vtob_p *vbpp, vbp; int hc = VTOB_HASH_CODE(m); vbpp = &mp->vtob[hc]; while (*vbpp && (*vbpp)->vaddr != m) vbpp = &(*vbpp)->next; if (*vbpp) { vbp = *vbpp; *vbpp = (*vbpp)->next; sym_m_free_dma_mem_cluster(mp, vbp); __sym_mfree(&mp0, vbp, sizeof(*vbp), "VTOB"); --mp->nump; } } #endif /* Fetch the memory pool for a given pool id (i.e. DMA constraints) */ static inline m_pool_p ___get_dma_pool(m_pool_ident_t dev_dmat) { m_pool_p mp; for (mp = mp0.next; mp && !sym_m_pool_match(mp->dev_dmat, dev_dmat); mp = mp->next); return mp; } /* Create a new memory DMAable pool (when fetch failed) */ static m_pool_p ___cre_dma_pool(m_pool_ident_t dev_dmat) { m_pool_p mp = __sym_calloc(&mp0, sizeof(*mp), "MPOOL"); if (mp) { mp->dev_dmat = dev_dmat; mp->get_mem_cluster = ___get_dma_mem_cluster; #ifdef SYM_MEM_FREE_UNUSED mp->free_mem_cluster = ___free_dma_mem_cluster; #endif mp->next = mp0.next; mp0.next = mp; return mp; } return NULL; } #ifdef SYM_MEM_FREE_UNUSED /* Destroy a DMAable memory pool (when got emptied) */ static void ___del_dma_pool(m_pool_p p) { m_pool_p *pp = &mp0.next; while (*pp && *pp != p) pp = &(*pp)->next; if (*pp) { *pp = (*pp)->next; __sym_mfree(&mp0, p, sizeof(*p), "MPOOL"); } } #endif /* This lock protects only the memory allocation/free. */ static DEFINE_SPINLOCK(sym53c8xx_lock); /* * Actual allocator for DMAable memory. */ void *__sym_calloc_dma(m_pool_ident_t dev_dmat, int size, char *name) { unsigned long flags; m_pool_p mp; void *m = NULL; spin_lock_irqsave(&sym53c8xx_lock, flags); mp = ___get_dma_pool(dev_dmat); if (!mp) mp = ___cre_dma_pool(dev_dmat); if (!mp) goto out; m = __sym_calloc(mp, size, name); #ifdef SYM_MEM_FREE_UNUSED if (!mp->nump) ___del_dma_pool(mp); #endif out: spin_unlock_irqrestore(&sym53c8xx_lock, flags); return m; } void __sym_mfree_dma(m_pool_ident_t dev_dmat, void *m, int size, char *name) { unsigned long flags; m_pool_p mp; spin_lock_irqsave(&sym53c8xx_lock, flags); mp = ___get_dma_pool(dev_dmat); if (!mp) goto out; __sym_mfree(mp, m, size, name); #ifdef SYM_MEM_FREE_UNUSED if (!mp->nump) ___del_dma_pool(mp); #endif out: spin_unlock_irqrestore(&sym53c8xx_lock, flags); } /* * Actual virtual to bus physical address translator * for 32 bit addressable DMAable memory. */ dma_addr_t __vtobus(m_pool_ident_t dev_dmat, void *m) { unsigned long flags; m_pool_p mp; int hc = VTOB_HASH_CODE(m); m_vtob_p vp = NULL; void *a = (void *)((unsigned long)m & ~SYM_MEM_CLUSTER_MASK); dma_addr_t b; spin_lock_irqsave(&sym53c8xx_lock, flags); mp = ___get_dma_pool(dev_dmat); if (mp) { vp = mp->vtob[hc]; while (vp && vp->vaddr != a) vp = vp->next; } if (!vp) panic("sym: VTOBUS FAILED!\n"); b = vp->baddr + (m - a); spin_unlock_irqrestore(&sym53c8xx_lock, flags); return b; }
linux-master
drivers/scsi/sym53c8xx_2/sym_malloc.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family * of PCI-SCSI IO processors. * * Copyright (C) 1999-2001 Gerard Roudier <[email protected]> * Copyright (c) 2003-2005 Matthew Wilcox <[email protected]> * * This driver is derived from the Linux sym53c8xx driver. * Copyright (C) 1998-2000 Gerard Roudier * * The sym53c8xx driver is derived from the ncr53c8xx driver that had been * a port of the FreeBSD ncr driver to Linux-1.2.13. * * The original ncr driver has been written for 386bsd and FreeBSD by * Wolfgang Stanglmeier <[email protected]> * Stefan Esser <[email protected]> * Copyright (C) 1994 Wolfgang Stanglmeier * * Other major contributions: * * NVRAM detection and reading. * Copyright (C) 1997 Richard Waltham <[email protected]> * *----------------------------------------------------------------------------- */ #include <linux/slab.h> #include <asm/param.h> /* for timeouts in units of HZ */ #include "sym_glue.h" #include "sym_nvram.h" #if 0 #define SYM_DEBUG_GENERIC_SUPPORT #endif /* * Needed function prototypes. */ static void sym_int_ma (struct sym_hcb *np); static void sym_int_sir(struct sym_hcb *); static struct sym_ccb *sym_alloc_ccb(struct sym_hcb *np); static struct sym_ccb *sym_ccb_from_dsa(struct sym_hcb *np, u32 dsa); static void sym_alloc_lcb_tags (struct sym_hcb *np, u_char tn, u_char ln); static void sym_complete_error (struct sym_hcb *np, struct sym_ccb *cp); static void sym_complete_ok (struct sym_hcb *np, struct sym_ccb *cp); static int sym_compute_residual(struct sym_hcb *np, struct sym_ccb *cp); /* * Print a buffer in hexadecimal format with a ".\n" at end. */ static void sym_printl_hex(u_char *p, int n) { while (n-- > 0) printf (" %x", *p++); printf (".\n"); } static void sym_print_msg(struct sym_ccb *cp, char *label, u_char *msg) { sym_print_addr(cp->cmd, "%s: ", label); spi_print_msg(msg); printf("\n"); } static void sym_print_nego_msg(struct sym_hcb *np, int target, char *label, u_char *msg) { struct sym_tcb *tp = &np->target[target]; dev_info(&tp->starget->dev, "%s: ", label); spi_print_msg(msg); printf("\n"); } /* * Print something that tells about extended errors. */ void sym_print_xerr(struct scsi_cmnd *cmd, int x_status) { if (x_status & XE_PARITY_ERR) { sym_print_addr(cmd, "unrecovered SCSI parity error.\n"); } if (x_status & XE_EXTRA_DATA) { sym_print_addr(cmd, "extraneous data discarded.\n"); } if (x_status & XE_BAD_PHASE) { sym_print_addr(cmd, "illegal scsi phase (4/5).\n"); } if (x_status & XE_SODL_UNRUN) { sym_print_addr(cmd, "ODD transfer in DATA OUT phase.\n"); } if (x_status & XE_SWIDE_OVRUN) { sym_print_addr(cmd, "ODD transfer in DATA IN phase.\n"); } } /* * Return a string for SCSI BUS mode. */ static char *sym_scsi_bus_mode(int mode) { switch(mode) { case SMODE_HVD: return "HVD"; case SMODE_SE: return "SE"; case SMODE_LVD: return "LVD"; } return "??"; } /* * Soft reset the chip. * * Raising SRST when the chip is running may cause * problems on dual function chips (see below). * On the other hand, LVD devices need some delay * to settle and report actual BUS mode in STEST4. */ static void sym_chip_reset (struct sym_hcb *np) { OUTB(np, nc_istat, SRST); INB(np, nc_mbox1); udelay(10); OUTB(np, nc_istat, 0); INB(np, nc_mbox1); udelay(2000); /* For BUS MODE to settle */ } /* * Really soft reset the chip.:) * * Some 896 and 876 chip revisions may hang-up if we set * the SRST (soft reset) bit at the wrong time when SCRIPTS * are running. * So, we need to abort the current operation prior to * soft resetting the chip. */ static void sym_soft_reset (struct sym_hcb *np) { u_char istat = 0; int i; if (!(np->features & FE_ISTAT1) || !(INB(np, nc_istat1) & SCRUN)) goto do_chip_reset; OUTB(np, nc_istat, CABRT); for (i = 100000 ; i ; --i) { istat = INB(np, nc_istat); if (istat & SIP) { INW(np, nc_sist); } else if (istat & DIP) { if (INB(np, nc_dstat) & ABRT) break; } udelay(5); } OUTB(np, nc_istat, 0); if (!i) printf("%s: unable to abort current chip operation, " "ISTAT=0x%02x.\n", sym_name(np), istat); do_chip_reset: sym_chip_reset(np); } /* * Start reset process. * * The interrupt handler will reinitialize the chip. */ static void sym_start_reset(struct sym_hcb *np) { sym_reset_scsi_bus(np, 1); } int sym_reset_scsi_bus(struct sym_hcb *np, int enab_int) { u32 term; int retv = 0; sym_soft_reset(np); /* Soft reset the chip */ if (enab_int) OUTW(np, nc_sien, RST); /* * Enable Tolerant, reset IRQD if present and * properly set IRQ mode, prior to resetting the bus. */ OUTB(np, nc_stest3, TE); OUTB(np, nc_dcntl, (np->rv_dcntl & IRQM)); OUTB(np, nc_scntl1, CRST); INB(np, nc_mbox1); udelay(200); if (!SYM_SETUP_SCSI_BUS_CHECK) goto out; /* * Check for no terminators or SCSI bus shorts to ground. * Read SCSI data bus, data parity bits and control signals. * We are expecting RESET to be TRUE and other signals to be * FALSE. */ term = INB(np, nc_sstat0); term = ((term & 2) << 7) + ((term & 1) << 17); /* rst sdp0 */ term |= ((INB(np, nc_sstat2) & 0x01) << 26) | /* sdp1 */ ((INW(np, nc_sbdl) & 0xff) << 9) | /* d7-0 */ ((INW(np, nc_sbdl) & 0xff00) << 10) | /* d15-8 */ INB(np, nc_sbcl); /* req ack bsy sel atn msg cd io */ if (!np->maxwide) term &= 0x3ffff; if (term != (2<<7)) { printf("%s: suspicious SCSI data while resetting the BUS.\n", sym_name(np)); printf("%s: %sdp0,d7-0,rst,req,ack,bsy,sel,atn,msg,c/d,i/o = " "0x%lx, expecting 0x%lx\n", sym_name(np), (np->features & FE_WIDE) ? "dp1,d15-8," : "", (u_long)term, (u_long)(2<<7)); if (SYM_SETUP_SCSI_BUS_CHECK == 1) retv = 1; } out: OUTB(np, nc_scntl1, 0); return retv; } /* * Select SCSI clock frequency */ static void sym_selectclock(struct sym_hcb *np, u_char scntl3) { /* * If multiplier not present or not selected, leave here. */ if (np->multiplier <= 1) { OUTB(np, nc_scntl3, scntl3); return; } if (sym_verbose >= 2) printf ("%s: enabling clock multiplier\n", sym_name(np)); OUTB(np, nc_stest1, DBLEN); /* Enable clock multiplier */ /* * Wait for the LCKFRQ bit to be set if supported by the chip. * Otherwise wait 50 micro-seconds (at least). */ if (np->features & FE_LCKFRQ) { int i = 20; while (!(INB(np, nc_stest4) & LCKFRQ) && --i > 0) udelay(20); if (!i) printf("%s: the chip cannot lock the frequency\n", sym_name(np)); } else { INB(np, nc_mbox1); udelay(50+10); } OUTB(np, nc_stest3, HSC); /* Halt the scsi clock */ OUTB(np, nc_scntl3, scntl3); OUTB(np, nc_stest1, (DBLEN|DBLSEL));/* Select clock multiplier */ OUTB(np, nc_stest3, 0x00); /* Restart scsi clock */ } /* * Determine the chip's clock frequency. * * This is essential for the negotiation of the synchronous * transfer rate. * * Note: we have to return the correct value. * THERE IS NO SAFE DEFAULT VALUE. * * Most NCR/SYMBIOS boards are delivered with a 40 Mhz clock. * 53C860 and 53C875 rev. 1 support fast20 transfers but * do not have a clock doubler and so are provided with a * 80 MHz clock. All other fast20 boards incorporate a doubler * and so should be delivered with a 40 MHz clock. * The recent fast40 chips (895/896/895A/1010) use a 40 Mhz base * clock and provide a clock quadrupler (160 Mhz). */ /* * calculate SCSI clock frequency (in KHz) */ static unsigned getfreq (struct sym_hcb *np, int gen) { unsigned int ms = 0; unsigned int f; /* * Measure GEN timer delay in order * to calculate SCSI clock frequency * * This code will never execute too * many loop iterations (if DELAY is * reasonably correct). It could get * too low a delay (too high a freq.) * if the CPU is slow executing the * loop for some reason (an NMI, for * example). For this reason we will * if multiple measurements are to be * performed trust the higher delay * (lower frequency returned). */ OUTW(np, nc_sien, 0); /* mask all scsi interrupts */ INW(np, nc_sist); /* clear pending scsi interrupt */ OUTB(np, nc_dien, 0); /* mask all dma interrupts */ INW(np, nc_sist); /* another one, just to be sure :) */ /* * The C1010-33 core does not report GEN in SIST, * if this interrupt is masked in SIEN. * I don't know yet if the C1010-66 behaves the same way. */ if (np->features & FE_C10) { OUTW(np, nc_sien, GEN); OUTB(np, nc_istat1, SIRQD); } OUTB(np, nc_scntl3, 4); /* set pre-scaler to divide by 3 */ OUTB(np, nc_stime1, 0); /* disable general purpose timer */ OUTB(np, nc_stime1, gen); /* set to nominal delay of 1<<gen * 125us */ while (!(INW(np, nc_sist) & GEN) && ms++ < 100000) udelay(1000/4); /* count in 1/4 of ms */ OUTB(np, nc_stime1, 0); /* disable general purpose timer */ /* * Undo C1010-33 specific settings. */ if (np->features & FE_C10) { OUTW(np, nc_sien, 0); OUTB(np, nc_istat1, 0); } /* * set prescaler to divide by whatever 0 means * 0 ought to choose divide by 2, but appears * to set divide by 3.5 mode in my 53c810 ... */ OUTB(np, nc_scntl3, 0); /* * adjust for prescaler, and convert into KHz */ f = ms ? ((1 << gen) * (4340*4)) / ms : 0; /* * The C1010-33 result is biased by a factor * of 2/3 compared to earlier chips. */ if (np->features & FE_C10) f = (f * 2) / 3; if (sym_verbose >= 2) printf ("%s: Delay (GEN=%d): %u msec, %u KHz\n", sym_name(np), gen, ms/4, f); return f; } static unsigned sym_getfreq (struct sym_hcb *np) { u_int f1, f2; int gen = 8; getfreq (np, gen); /* throw away first result */ f1 = getfreq (np, gen); f2 = getfreq (np, gen); if (f1 > f2) f1 = f2; /* trust lower result */ return f1; } /* * Get/probe chip SCSI clock frequency */ static void sym_getclock (struct sym_hcb *np, int mult) { unsigned char scntl3 = np->sv_scntl3; unsigned char stest1 = np->sv_stest1; unsigned f1; np->multiplier = 1; f1 = 40000; /* * True with 875/895/896/895A with clock multiplier selected */ if (mult > 1 && (stest1 & (DBLEN+DBLSEL)) == DBLEN+DBLSEL) { if (sym_verbose >= 2) printf ("%s: clock multiplier found\n", sym_name(np)); np->multiplier = mult; } /* * If multiplier not found or scntl3 not 7,5,3, * reset chip and get frequency from general purpose timer. * Otherwise trust scntl3 BIOS setting. */ if (np->multiplier != mult || (scntl3 & 7) < 3 || !(scntl3 & 1)) { OUTB(np, nc_stest1, 0); /* make sure doubler is OFF */ f1 = sym_getfreq (np); if (sym_verbose) printf ("%s: chip clock is %uKHz\n", sym_name(np), f1); if (f1 < 45000) f1 = 40000; else if (f1 < 55000) f1 = 50000; else f1 = 80000; if (f1 < 80000 && mult > 1) { if (sym_verbose >= 2) printf ("%s: clock multiplier assumed\n", sym_name(np)); np->multiplier = mult; } } else { if ((scntl3 & 7) == 3) f1 = 40000; else if ((scntl3 & 7) == 5) f1 = 80000; else f1 = 160000; f1 /= np->multiplier; } /* * Compute controller synchronous parameters. */ f1 *= np->multiplier; np->clock_khz = f1; } /* * Get/probe PCI clock frequency */ static int sym_getpciclock (struct sym_hcb *np) { int f = 0; /* * For now, we only need to know about the actual * PCI BUS clock frequency for C1010-66 chips. */ #if 1 if (np->features & FE_66MHZ) { #else if (1) { #endif OUTB(np, nc_stest1, SCLK); /* Use the PCI clock as SCSI clock */ f = sym_getfreq(np); OUTB(np, nc_stest1, 0); } np->pciclk_khz = f; return f; } /* * SYMBIOS chip clock divisor table. * * Divisors are multiplied by 10,000,000 in order to make * calculations more simple. */ #define _5M 5000000 static const u32 div_10M[] = {2*_5M, 3*_5M, 4*_5M, 6*_5M, 8*_5M, 12*_5M, 16*_5M}; /* * Get clock factor and sync divisor for a given * synchronous factor period. */ static int sym_getsync(struct sym_hcb *np, u_char dt, u_char sfac, u_char *divp, u_char *fakp) { u32 clk = np->clock_khz; /* SCSI clock frequency in kHz */ int div = np->clock_divn; /* Number of divisors supported */ u32 fak; /* Sync factor in sxfer */ u32 per; /* Period in tenths of ns */ u32 kpc; /* (per * clk) */ int ret; /* * Compute the synchronous period in tenths of nano-seconds */ if (dt && sfac <= 9) per = 125; else if (sfac <= 10) per = 250; else if (sfac == 11) per = 303; else if (sfac == 12) per = 500; else per = 40 * sfac; ret = per; kpc = per * clk; if (dt) kpc <<= 1; /* * For earliest C10 revision 0, we cannot use extra * clocks for the setting of the SCSI clocking. * Note that this limits the lowest sync data transfer * to 5 Mega-transfers per second and may result in * using higher clock divisors. */ #if 1 if ((np->features & (FE_C10|FE_U3EN)) == FE_C10) { /* * Look for the lowest clock divisor that allows an * output speed not faster than the period. */ while (div > 0) { --div; if (kpc > (div_10M[div] << 2)) { ++div; break; } } fak = 0; /* No extra clocks */ if (div == np->clock_divn) { /* Are we too fast ? */ ret = -1; } *divp = div; *fakp = fak; return ret; } #endif /* * Look for the greatest clock divisor that allows an * input speed faster than the period. */ while (--div > 0) if (kpc >= (div_10M[div] << 2)) break; /* * Calculate the lowest clock factor that allows an output * speed not faster than the period, and the max output speed. * If fak >= 1 we will set both XCLKH_ST and XCLKH_DT. * If fak >= 2 we will also set XCLKS_ST and XCLKS_DT. */ if (dt) { fak = (kpc - 1) / (div_10M[div] << 1) + 1 - 2; /* ret = ((2+fak)*div_10M[div])/np->clock_khz; */ } else { fak = (kpc - 1) / div_10M[div] + 1 - 4; /* ret = ((4+fak)*div_10M[div])/np->clock_khz; */ } /* * Check against our hardware limits, or bugs :). */ if (fak > 2) { fak = 2; ret = -1; } /* * Compute and return sync parameters. */ *divp = div; *fakp = fak; return ret; } /* * SYMBIOS chips allow burst lengths of 2, 4, 8, 16, 32, 64, * 128 transfers. All chips support at least 16 transfers * bursts. The 825A, 875 and 895 chips support bursts of up * to 128 transfers and the 895A and 896 support bursts of up * to 64 transfers. All other chips support up to 16 * transfers bursts. * * For PCI 32 bit data transfers each transfer is a DWORD. * It is a QUADWORD (8 bytes) for PCI 64 bit data transfers. * * We use log base 2 (burst length) as internal code, with * value 0 meaning "burst disabled". */ /* * Burst length from burst code. */ #define burst_length(bc) (!(bc))? 0 : 1 << (bc) /* * Burst code from io register bits. */ #define burst_code(dmode, ctest4, ctest5) \ (ctest4) & 0x80? 0 : (((dmode) & 0xc0) >> 6) + ((ctest5) & 0x04) + 1 /* * Set initial io register bits from burst code. */ static inline void sym_init_burst(struct sym_hcb *np, u_char bc) { np->rv_ctest4 &= ~0x80; np->rv_dmode &= ~(0x3 << 6); np->rv_ctest5 &= ~0x4; if (!bc) { np->rv_ctest4 |= 0x80; } else { --bc; np->rv_dmode |= ((bc & 0x3) << 6); np->rv_ctest5 |= (bc & 0x4); } } /* * Save initial settings of some IO registers. * Assumed to have been set by BIOS. * We cannot reset the chip prior to reading the * IO registers, since informations will be lost. * Since the SCRIPTS processor may be running, this * is not safe on paper, but it seems to work quite * well. :) */ static void sym_save_initial_setting (struct sym_hcb *np) { np->sv_scntl0 = INB(np, nc_scntl0) & 0x0a; np->sv_scntl3 = INB(np, nc_scntl3) & 0x07; np->sv_dmode = INB(np, nc_dmode) & 0xce; np->sv_dcntl = INB(np, nc_dcntl) & 0xa8; np->sv_ctest3 = INB(np, nc_ctest3) & 0x01; np->sv_ctest4 = INB(np, nc_ctest4) & 0x80; np->sv_gpcntl = INB(np, nc_gpcntl); np->sv_stest1 = INB(np, nc_stest1); np->sv_stest2 = INB(np, nc_stest2) & 0x20; np->sv_stest4 = INB(np, nc_stest4); if (np->features & FE_C10) { /* Always large DMA fifo + ultra3 */ np->sv_scntl4 = INB(np, nc_scntl4); np->sv_ctest5 = INB(np, nc_ctest5) & 0x04; } else np->sv_ctest5 = INB(np, nc_ctest5) & 0x24; } /* * Set SCSI BUS mode. * - LVD capable chips (895/895A/896/1010) report the current BUS mode * through the STEST4 IO register. * - For previous generation chips (825/825A/875), the user has to tell us * how to check against HVD, since a 100% safe algorithm is not possible. */ static void sym_set_bus_mode(struct sym_hcb *np, struct sym_nvram *nvram) { if (np->scsi_mode) return; np->scsi_mode = SMODE_SE; if (np->features & (FE_ULTRA2|FE_ULTRA3)) np->scsi_mode = (np->sv_stest4 & SMODE); else if (np->features & FE_DIFF) { if (SYM_SETUP_SCSI_DIFF == 1) { if (np->sv_scntl3) { if (np->sv_stest2 & 0x20) np->scsi_mode = SMODE_HVD; } else if (nvram->type == SYM_SYMBIOS_NVRAM) { if (!(INB(np, nc_gpreg) & 0x08)) np->scsi_mode = SMODE_HVD; } } else if (SYM_SETUP_SCSI_DIFF == 2) np->scsi_mode = SMODE_HVD; } if (np->scsi_mode == SMODE_HVD) np->rv_stest2 |= 0x20; } /* * Prepare io register values used by sym_start_up() * according to selected and supported features. */ static int sym_prepare_setting(struct Scsi_Host *shost, struct sym_hcb *np, struct sym_nvram *nvram) { struct sym_data *sym_data = shost_priv(shost); struct pci_dev *pdev = sym_data->pdev; u_char burst_max; u32 period; int i; np->maxwide = (np->features & FE_WIDE) ? 1 : 0; /* * Guess the frequency of the chip's clock. */ if (np->features & (FE_ULTRA3 | FE_ULTRA2)) np->clock_khz = 160000; else if (np->features & FE_ULTRA) np->clock_khz = 80000; else np->clock_khz = 40000; /* * Get the clock multiplier factor. */ if (np->features & FE_QUAD) np->multiplier = 4; else if (np->features & FE_DBLR) np->multiplier = 2; else np->multiplier = 1; /* * Measure SCSI clock frequency for chips * it may vary from assumed one. */ if (np->features & FE_VARCLK) sym_getclock(np, np->multiplier); /* * Divisor to be used for async (timer pre-scaler). */ i = np->clock_divn - 1; while (--i >= 0) { if (10ul * SYM_CONF_MIN_ASYNC * np->clock_khz > div_10M[i]) { ++i; break; } } np->rv_scntl3 = i+1; /* * The C1010 uses hardwired divisors for async. * So, we just throw away, the async. divisor.:-) */ if (np->features & FE_C10) np->rv_scntl3 = 0; /* * Minimum synchronous period factor supported by the chip. * Btw, 'period' is in tenths of nanoseconds. */ period = (4 * div_10M[0] + np->clock_khz - 1) / np->clock_khz; if (period <= 250) np->minsync = 10; else if (period <= 303) np->minsync = 11; else if (period <= 500) np->minsync = 12; else np->minsync = (period + 40 - 1) / 40; /* * Check against chip SCSI standard support (SCSI-2,ULTRA,ULTRA2). */ if (np->minsync < 25 && !(np->features & (FE_ULTRA|FE_ULTRA2|FE_ULTRA3))) np->minsync = 25; else if (np->minsync < 12 && !(np->features & (FE_ULTRA2|FE_ULTRA3))) np->minsync = 12; /* * Maximum synchronous period factor supported by the chip. */ period = div64_ul(11 * div_10M[np->clock_divn - 1], 4 * np->clock_khz); np->maxsync = period > 2540 ? 254 : period / 10; /* * If chip is a C1010, guess the sync limits in DT mode. */ if ((np->features & (FE_C10|FE_ULTRA3)) == (FE_C10|FE_ULTRA3)) { if (np->clock_khz == 160000) { np->minsync_dt = 9; np->maxsync_dt = 50; np->maxoffs_dt = nvram->type ? 62 : 31; } } /* * 64 bit addressing (895A/896/1010) ? */ if (np->features & FE_DAC) { if (!use_dac(np)) np->rv_ccntl1 |= (DDAC); else if (SYM_CONF_DMA_ADDRESSING_MODE == 1) np->rv_ccntl1 |= (XTIMOD | EXTIBMV); else if (SYM_CONF_DMA_ADDRESSING_MODE == 2) np->rv_ccntl1 |= (0 | EXTIBMV); } /* * Phase mismatch handled by SCRIPTS (895A/896/1010) ? */ if (np->features & FE_NOPM) np->rv_ccntl0 |= (ENPMJ); /* * C1010-33 Errata: Part Number:609-039638 (rev. 1) is fixed. * In dual channel mode, contention occurs if internal cycles * are used. Disable internal cycles. */ if (pdev->device == PCI_DEVICE_ID_LSI_53C1010_33 && pdev->revision < 0x1) np->rv_ccntl0 |= DILS; /* * Select burst length (dwords) */ burst_max = SYM_SETUP_BURST_ORDER; if (burst_max == 255) burst_max = burst_code(np->sv_dmode, np->sv_ctest4, np->sv_ctest5); if (burst_max > 7) burst_max = 7; if (burst_max > np->maxburst) burst_max = np->maxburst; /* * DEL 352 - 53C810 Rev x11 - Part Number 609-0392140 - ITEM 2. * This chip and the 860 Rev 1 may wrongly use PCI cache line * based transactions on LOAD/STORE instructions. So we have * to prevent these chips from using such PCI transactions in * this driver. The generic ncr driver that does not use * LOAD/STORE instructions does not need this work-around. */ if ((pdev->device == PCI_DEVICE_ID_NCR_53C810 && pdev->revision >= 0x10 && pdev->revision <= 0x11) || (pdev->device == PCI_DEVICE_ID_NCR_53C860 && pdev->revision <= 0x1)) np->features &= ~(FE_WRIE|FE_ERL|FE_ERMP); /* * Select all supported special features. * If we are using on-board RAM for scripts, prefetch (PFEN) * does not help, but burst op fetch (BOF) does. * Disabling PFEN makes sure BOF will be used. */ if (np->features & FE_ERL) np->rv_dmode |= ERL; /* Enable Read Line */ if (np->features & FE_BOF) np->rv_dmode |= BOF; /* Burst Opcode Fetch */ if (np->features & FE_ERMP) np->rv_dmode |= ERMP; /* Enable Read Multiple */ #if 1 if ((np->features & FE_PFEN) && !np->ram_ba) #else if (np->features & FE_PFEN) #endif np->rv_dcntl |= PFEN; /* Prefetch Enable */ if (np->features & FE_CLSE) np->rv_dcntl |= CLSE; /* Cache Line Size Enable */ if (np->features & FE_WRIE) np->rv_ctest3 |= WRIE; /* Write and Invalidate */ if (np->features & FE_DFS) np->rv_ctest5 |= DFS; /* Dma Fifo Size */ /* * Select some other */ np->rv_ctest4 |= MPEE; /* Master parity checking */ np->rv_scntl0 |= 0x0a; /* full arb., ena parity, par->ATN */ /* * Get parity checking, host ID and verbose mode from NVRAM */ np->myaddr = 255; np->scsi_mode = 0; sym_nvram_setup_host(shost, np, nvram); /* * Get SCSI addr of host adapter (set by bios?). */ if (np->myaddr == 255) { np->myaddr = INB(np, nc_scid) & 0x07; if (!np->myaddr) np->myaddr = SYM_SETUP_HOST_ID; } /* * Prepare initial io register bits for burst length */ sym_init_burst(np, burst_max); sym_set_bus_mode(np, nvram); /* * Set LED support from SCRIPTS. * Ignore this feature for boards known to use a * specific GPIO wiring and for the 895A, 896 * and 1010 that drive the LED directly. */ if ((SYM_SETUP_SCSI_LED || (nvram->type == SYM_SYMBIOS_NVRAM || (nvram->type == SYM_TEKRAM_NVRAM && pdev->device == PCI_DEVICE_ID_NCR_53C895))) && !(np->features & FE_LEDC) && !(np->sv_gpcntl & 0x01)) np->features |= FE_LED0; /* * Set irq mode. */ switch(SYM_SETUP_IRQ_MODE & 3) { case 2: np->rv_dcntl |= IRQM; break; case 1: np->rv_dcntl |= (np->sv_dcntl & IRQM); break; default: break; } /* * Configure targets according to driver setup. * If NVRAM present get targets setup from NVRAM. */ for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { struct sym_tcb *tp = &np->target[i]; tp->usrflags |= (SYM_DISC_ENABLED | SYM_TAGS_ENABLED); tp->usrtags = SYM_SETUP_MAX_TAG; tp->usr_width = np->maxwide; tp->usr_period = 9; sym_nvram_setup_target(tp, i, nvram); if (!tp->usrtags) tp->usrflags &= ~SYM_TAGS_ENABLED; } /* * Let user know about the settings. */ printf("%s: %s, ID %d, Fast-%d, %s, %s\n", sym_name(np), sym_nvram_type(nvram), np->myaddr, (np->features & FE_ULTRA3) ? 80 : (np->features & FE_ULTRA2) ? 40 : (np->features & FE_ULTRA) ? 20 : 10, sym_scsi_bus_mode(np->scsi_mode), (np->rv_scntl0 & 0xa) ? "parity checking" : "NO parity"); /* * Tell him more on demand. */ if (sym_verbose) { printf("%s: %s IRQ line driver%s\n", sym_name(np), np->rv_dcntl & IRQM ? "totem pole" : "open drain", np->ram_ba ? ", using on-chip SRAM" : ""); printf("%s: using %s firmware.\n", sym_name(np), np->fw_name); if (np->features & FE_NOPM) printf("%s: handling phase mismatch from SCRIPTS.\n", sym_name(np)); } /* * And still more. */ if (sym_verbose >= 2) { printf ("%s: initial SCNTL3/DMODE/DCNTL/CTEST3/4/5 = " "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n", sym_name(np), np->sv_scntl3, np->sv_dmode, np->sv_dcntl, np->sv_ctest3, np->sv_ctest4, np->sv_ctest5); printf ("%s: final SCNTL3/DMODE/DCNTL/CTEST3/4/5 = " "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n", sym_name(np), np->rv_scntl3, np->rv_dmode, np->rv_dcntl, np->rv_ctest3, np->rv_ctest4, np->rv_ctest5); } return 0; } /* * Test the pci bus snoop logic :-( * * Has to be called with interrupts disabled. */ #ifdef CONFIG_SCSI_SYM53C8XX_MMIO static int sym_regtest(struct sym_hcb *np) { register volatile u32 data; /* * chip registers may NOT be cached. * write 0xffffffff to a read only register area, * and try to read it back. */ data = 0xffffffff; OUTL(np, nc_dstat, data); data = INL(np, nc_dstat); #if 1 if (data == 0xffffffff) { #else if ((data & 0xe2f0fffd) != 0x02000080) { #endif printf ("CACHE TEST FAILED: reg dstat-sstat2 readback %x.\n", (unsigned) data); return 0x10; } return 0; } #else static inline int sym_regtest(struct sym_hcb *np) { return 0; } #endif static int sym_snooptest(struct sym_hcb *np) { u32 sym_rd, sym_wr, sym_bk, host_rd, host_wr, pc, dstat; int i, err; err = sym_regtest(np); if (err) return err; restart_test: /* * Enable Master Parity Checking as we intend * to enable it for normal operations. */ OUTB(np, nc_ctest4, (np->rv_ctest4 & MPEE)); /* * init */ pc = SCRIPTZ_BA(np, snooptest); host_wr = 1; sym_wr = 2; /* * Set memory and register. */ np->scratch = cpu_to_scr(host_wr); OUTL(np, nc_temp, sym_wr); /* * Start script (exchange values) */ OUTL(np, nc_dsa, np->hcb_ba); OUTL_DSP(np, pc); /* * Wait 'til done (with timeout) */ for (i=0; i<SYM_SNOOP_TIMEOUT; i++) if (INB(np, nc_istat) & (INTF|SIP|DIP)) break; if (i>=SYM_SNOOP_TIMEOUT) { printf ("CACHE TEST FAILED: timeout.\n"); return (0x20); } /* * Check for fatal DMA errors. */ dstat = INB(np, nc_dstat); #if 1 /* Band aiding for broken hardwares that fail PCI parity */ if ((dstat & MDPE) && (np->rv_ctest4 & MPEE)) { printf ("%s: PCI DATA PARITY ERROR DETECTED - " "DISABLING MASTER DATA PARITY CHECKING.\n", sym_name(np)); np->rv_ctest4 &= ~MPEE; goto restart_test; } #endif if (dstat & (MDPE|BF|IID)) { printf ("CACHE TEST FAILED: DMA error (dstat=0x%02x).", dstat); return (0x80); } /* * Save termination position. */ pc = INL(np, nc_dsp); /* * Read memory and register. */ host_rd = scr_to_cpu(np->scratch); sym_rd = INL(np, nc_scratcha); sym_bk = INL(np, nc_temp); /* * Check termination position. */ if (pc != SCRIPTZ_BA(np, snoopend)+8) { printf ("CACHE TEST FAILED: script execution failed.\n"); printf ("start=%08lx, pc=%08lx, end=%08lx\n", (u_long) SCRIPTZ_BA(np, snooptest), (u_long) pc, (u_long) SCRIPTZ_BA(np, snoopend) +8); return (0x40); } /* * Show results. */ if (host_wr != sym_rd) { printf ("CACHE TEST FAILED: host wrote %d, chip read %d.\n", (int) host_wr, (int) sym_rd); err |= 1; } if (host_rd != sym_wr) { printf ("CACHE TEST FAILED: chip wrote %d, host read %d.\n", (int) sym_wr, (int) host_rd); err |= 2; } if (sym_bk != sym_wr) { printf ("CACHE TEST FAILED: chip wrote %d, read back %d.\n", (int) sym_wr, (int) sym_bk); err |= 4; } return err; } /* * log message for real hard errors * * sym0 targ 0?: ERROR (ds:si) (so-si-sd) (sx/s3/s4) @ name (dsp:dbc). * reg: r0 r1 r2 r3 r4 r5 r6 ..... rf. * * exception register: * ds: dstat * si: sist * * SCSI bus lines: * so: control lines as driven by chip. * si: control lines as seen by chip. * sd: scsi data lines as seen by chip. * * wide/fastmode: * sx: sxfer (see the manual) * s3: scntl3 (see the manual) * s4: scntl4 (see the manual) * * current script command: * dsp: script address (relative to start of script). * dbc: first word of script command. * * First 24 register of the chip: * r0..rf */ static void sym_log_hard_error(struct Scsi_Host *shost, u_short sist, u_char dstat) { struct sym_hcb *np = sym_get_hcb(shost); u32 dsp; int script_ofs; int script_size; char *script_name; u_char *script_base; int i; dsp = INL(np, nc_dsp); if (dsp > np->scripta_ba && dsp <= np->scripta_ba + np->scripta_sz) { script_ofs = dsp - np->scripta_ba; script_size = np->scripta_sz; script_base = (u_char *) np->scripta0; script_name = "scripta"; } else if (np->scriptb_ba < dsp && dsp <= np->scriptb_ba + np->scriptb_sz) { script_ofs = dsp - np->scriptb_ba; script_size = np->scriptb_sz; script_base = (u_char *) np->scriptb0; script_name = "scriptb"; } else { script_ofs = dsp; script_size = 0; script_base = NULL; script_name = "mem"; } printf ("%s:%d: ERROR (%x:%x) (%x-%x-%x) (%x/%x/%x) @ (%s %x:%08x).\n", sym_name(np), (unsigned)INB(np, nc_sdid)&0x0f, dstat, sist, (unsigned)INB(np, nc_socl), (unsigned)INB(np, nc_sbcl), (unsigned)INB(np, nc_sbdl), (unsigned)INB(np, nc_sxfer), (unsigned)INB(np, nc_scntl3), (np->features & FE_C10) ? (unsigned)INB(np, nc_scntl4) : 0, script_name, script_ofs, (unsigned)INL(np, nc_dbc)); if (((script_ofs & 3) == 0) && (unsigned)script_ofs < script_size) { printf ("%s: script cmd = %08x\n", sym_name(np), scr_to_cpu((int) *(u32 *)(script_base + script_ofs))); } printf("%s: regdump:", sym_name(np)); for (i = 0; i < 24; i++) printf(" %02x", (unsigned)INB_OFF(np, i)); printf(".\n"); /* * PCI BUS error. */ if (dstat & (MDPE|BF)) sym_log_bus_error(shost); } void sym_dump_registers(struct Scsi_Host *shost) { struct sym_hcb *np = sym_get_hcb(shost); u_short sist; u_char dstat; sist = INW(np, nc_sist); dstat = INB(np, nc_dstat); sym_log_hard_error(shost, sist, dstat); } static struct sym_chip sym_dev_table[] = { {PCI_DEVICE_ID_NCR_53C810, 0x0f, "810", 4, 8, 4, 64, FE_ERL} , #ifdef SYM_DEBUG_GENERIC_SUPPORT {PCI_DEVICE_ID_NCR_53C810, 0xff, "810a", 4, 8, 4, 1, FE_BOF} , #else {PCI_DEVICE_ID_NCR_53C810, 0xff, "810a", 4, 8, 4, 1, FE_CACHE_SET|FE_LDSTR|FE_PFEN|FE_BOF} , #endif {PCI_DEVICE_ID_NCR_53C815, 0xff, "815", 4, 8, 4, 64, FE_BOF|FE_ERL} , {PCI_DEVICE_ID_NCR_53C825, 0x0f, "825", 6, 8, 4, 64, FE_WIDE|FE_BOF|FE_ERL|FE_DIFF} , {PCI_DEVICE_ID_NCR_53C825, 0xff, "825a", 6, 8, 4, 2, FE_WIDE|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM|FE_DIFF} , {PCI_DEVICE_ID_NCR_53C860, 0xff, "860", 4, 8, 5, 1, FE_ULTRA|FE_CACHE_SET|FE_BOF|FE_LDSTR|FE_PFEN} , {PCI_DEVICE_ID_NCR_53C875, 0x01, "875", 6, 16, 5, 2, FE_WIDE|FE_ULTRA|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_DIFF|FE_VARCLK} , {PCI_DEVICE_ID_NCR_53C875, 0xff, "875", 6, 16, 5, 2, FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_DIFF|FE_VARCLK} , {PCI_DEVICE_ID_NCR_53C875J, 0xff, "875J", 6, 16, 5, 2, FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_DIFF|FE_VARCLK} , {PCI_DEVICE_ID_NCR_53C885, 0xff, "885", 6, 16, 5, 2, FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_DIFF|FE_VARCLK} , #ifdef SYM_DEBUG_GENERIC_SUPPORT {PCI_DEVICE_ID_NCR_53C895, 0xff, "895", 6, 31, 7, 2, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS| FE_RAM|FE_LCKFRQ} , #else {PCI_DEVICE_ID_NCR_53C895, 0xff, "895", 6, 31, 7, 2, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_LCKFRQ} , #endif {PCI_DEVICE_ID_NCR_53C896, 0xff, "896", 6, 31, 7, 4, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ} , {PCI_DEVICE_ID_LSI_53C895A, 0xff, "895a", 6, 31, 7, 4, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_RAM8K|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ} , {PCI_DEVICE_ID_LSI_53C875A, 0xff, "875a", 6, 31, 7, 4, FE_WIDE|FE_ULTRA|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ} , {PCI_DEVICE_ID_LSI_53C1010_33, 0x00, "1010-33", 6, 31, 7, 8, FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN| FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_CRC| FE_C10} , {PCI_DEVICE_ID_LSI_53C1010_33, 0xff, "1010-33", 6, 31, 7, 8, FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN| FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_CRC| FE_C10|FE_U3EN} , {PCI_DEVICE_ID_LSI_53C1010_66, 0xff, "1010-66", 6, 31, 7, 8, FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN| FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_66MHZ|FE_CRC| FE_C10|FE_U3EN} , {PCI_DEVICE_ID_LSI_53C1510, 0xff, "1510d", 6, 31, 7, 4, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_IO256|FE_LEDC} }; #define sym_num_devs (ARRAY_SIZE(sym_dev_table)) /* * Look up the chip table. * * Return a pointer to the chip entry if found, * zero otherwise. */ struct sym_chip * sym_lookup_chip_table (u_short device_id, u_char revision) { struct sym_chip *chip; int i; for (i = 0; i < sym_num_devs; i++) { chip = &sym_dev_table[i]; if (device_id != chip->device_id) continue; if (revision > chip->revision_id) continue; return chip; } return NULL; } #if SYM_CONF_DMA_ADDRESSING_MODE == 2 /* * Lookup the 64 bit DMA segments map. * This is only used if the direct mapping * has been unsuccessful. */ int sym_lookup_dmap(struct sym_hcb *np, u32 h, int s) { int i; if (!use_dac(np)) goto weird; /* Look up existing mappings */ for (i = SYM_DMAP_SIZE-1; i > 0; i--) { if (h == np->dmap_bah[i]) return i; } /* If direct mapping is free, get it */ if (!np->dmap_bah[s]) goto new; /* Collision -> lookup free mappings */ for (s = SYM_DMAP_SIZE-1; s > 0; s--) { if (!np->dmap_bah[s]) goto new; } weird: panic("sym: ran out of 64 bit DMA segment registers"); return -1; new: np->dmap_bah[s] = h; np->dmap_dirty = 1; return s; } /* * Update IO registers scratch C..R so they will be * in sync. with queued CCB expectations. */ static void sym_update_dmap_regs(struct sym_hcb *np) { int o, i; if (!np->dmap_dirty) return; o = offsetof(struct sym_reg, nc_scrx[0]); for (i = 0; i < SYM_DMAP_SIZE; i++) { OUTL_OFF(np, o, np->dmap_bah[i]); o += 4; } np->dmap_dirty = 0; } #endif /* Enforce all the fiddly SPI rules and the chip limitations */ static void sym_check_goals(struct sym_hcb *np, struct scsi_target *starget, struct sym_trans *goal) { if (!spi_support_wide(starget)) goal->width = 0; if (!spi_support_sync(starget)) { goal->iu = 0; goal->dt = 0; goal->qas = 0; goal->offset = 0; return; } if (spi_support_dt(starget)) { if (spi_support_dt_only(starget)) goal->dt = 1; if (goal->offset == 0) goal->dt = 0; } else { goal->dt = 0; } /* Some targets fail to properly negotiate DT in SE mode */ if ((np->scsi_mode != SMODE_LVD) || !(np->features & FE_U3EN)) goal->dt = 0; if (goal->dt) { /* all DT transfers must be wide */ goal->width = 1; if (goal->offset > np->maxoffs_dt) goal->offset = np->maxoffs_dt; if (goal->period < np->minsync_dt) goal->period = np->minsync_dt; if (goal->period > np->maxsync_dt) goal->period = np->maxsync_dt; } else { goal->iu = goal->qas = 0; if (goal->offset > np->maxoffs) goal->offset = np->maxoffs; if (goal->period < np->minsync) goal->period = np->minsync; if (goal->period > np->maxsync) goal->period = np->maxsync; } } /* * Prepare the next negotiation message if needed. * * Fill in the part of message buffer that contains the * negotiation and the nego_status field of the CCB. * Returns the size of the message in bytes. */ static int sym_prepare_nego(struct sym_hcb *np, struct sym_ccb *cp, u_char *msgptr) { struct sym_tcb *tp = &np->target[cp->target]; struct scsi_target *starget = tp->starget; struct sym_trans *goal = &tp->tgoal; int msglen = 0; int nego; sym_check_goals(np, starget, goal); /* * Many devices implement PPR in a buggy way, so only use it if we * really want to. */ if (goal->renego == NS_PPR || (goal->offset && (goal->iu || goal->dt || goal->qas || (goal->period < 0xa)))) { nego = NS_PPR; } else if (goal->renego == NS_WIDE || goal->width) { nego = NS_WIDE; } else if (goal->renego == NS_SYNC || goal->offset) { nego = NS_SYNC; } else { goal->check_nego = 0; nego = 0; } switch (nego) { case NS_SYNC: msglen += spi_populate_sync_msg(msgptr + msglen, goal->period, goal->offset); break; case NS_WIDE: msglen += spi_populate_width_msg(msgptr + msglen, goal->width); break; case NS_PPR: msglen += spi_populate_ppr_msg(msgptr + msglen, goal->period, goal->offset, goal->width, (goal->iu ? PPR_OPT_IU : 0) | (goal->dt ? PPR_OPT_DT : 0) | (goal->qas ? PPR_OPT_QAS : 0)); break; } cp->nego_status = nego; if (nego) { tp->nego_cp = cp; /* Keep track a nego will be performed */ if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_nego_msg(np, cp->target, nego == NS_SYNC ? "sync msgout" : nego == NS_WIDE ? "wide msgout" : "ppr msgout", msgptr); } } return msglen; } /* * Insert a job into the start queue. */ void sym_put_start_queue(struct sym_hcb *np, struct sym_ccb *cp) { u_short qidx; #ifdef SYM_CONF_IARB_SUPPORT /* * If the previously queued CCB is not yet done, * set the IARB hint. The SCRIPTS will go with IARB * for this job when starting the previous one. * We leave devices a chance to win arbitration by * not using more than 'iarb_max' consecutive * immediate arbitrations. */ if (np->last_cp && np->iarb_count < np->iarb_max) { np->last_cp->host_flags |= HF_HINT_IARB; ++np->iarb_count; } else np->iarb_count = 0; np->last_cp = cp; #endif #if SYM_CONF_DMA_ADDRESSING_MODE == 2 /* * Make SCRIPTS aware of the 64 bit DMA * segment registers not being up-to-date. */ if (np->dmap_dirty) cp->host_xflags |= HX_DMAP_DIRTY; #endif /* * Insert first the idle task and then our job. * The MBs should ensure proper ordering. */ qidx = np->squeueput + 2; if (qidx >= MAX_QUEUE*2) qidx = 0; np->squeue [qidx] = cpu_to_scr(np->idletask_ba); MEMORY_WRITE_BARRIER(); np->squeue [np->squeueput] = cpu_to_scr(cp->ccb_ba); np->squeueput = qidx; if (DEBUG_FLAGS & DEBUG_QUEUE) scmd_printk(KERN_DEBUG, cp->cmd, "queuepos=%d\n", np->squeueput); /* * Script processor may be waiting for reselect. * Wake it up. */ MEMORY_WRITE_BARRIER(); OUTB(np, nc_istat, SIGP|np->istat_sem); } #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING /* * Start next ready-to-start CCBs. */ void sym_start_next_ccbs(struct sym_hcb *np, struct sym_lcb *lp, int maxn) { SYM_QUEHEAD *qp; struct sym_ccb *cp; /* * Paranoia, as usual. :-) */ assert(!lp->started_tags || !lp->started_no_tag); /* * Try to start as many commands as asked by caller. * Prevent from having both tagged and untagged * commands queued to the device at the same time. */ while (maxn--) { qp = sym_remque_head(&lp->waiting_ccbq); if (!qp) break; cp = sym_que_entry(qp, struct sym_ccb, link2_ccbq); if (cp->tag != NO_TAG) { if (lp->started_no_tag || lp->started_tags >= lp->started_max) { sym_insque_head(qp, &lp->waiting_ccbq); break; } lp->itlq_tbl[cp->tag] = cpu_to_scr(cp->ccb_ba); lp->head.resel_sa = cpu_to_scr(SCRIPTA_BA(np, resel_tag)); ++lp->started_tags; } else { if (lp->started_no_tag || lp->started_tags) { sym_insque_head(qp, &lp->waiting_ccbq); break; } lp->head.itl_task_sa = cpu_to_scr(cp->ccb_ba); lp->head.resel_sa = cpu_to_scr(SCRIPTA_BA(np, resel_no_tag)); ++lp->started_no_tag; } cp->started = 1; sym_insque_tail(qp, &lp->started_ccbq); sym_put_start_queue(np, cp); } } #endif /* SYM_OPT_HANDLE_DEVICE_QUEUEING */ /* * The chip may have completed jobs. Look at the DONE QUEUE. * * On paper, memory read barriers may be needed here to * prevent out of order LOADs by the CPU from having * prefetched stale data prior to DMA having occurred. */ static int sym_wakeup_done (struct sym_hcb *np) { struct sym_ccb *cp; int i, n; u32 dsa; n = 0; i = np->dqueueget; /* MEMORY_READ_BARRIER(); */ while (1) { dsa = scr_to_cpu(np->dqueue[i]); if (!dsa) break; np->dqueue[i] = 0; if ((i = i+2) >= MAX_QUEUE*2) i = 0; cp = sym_ccb_from_dsa(np, dsa); if (cp) { MEMORY_READ_BARRIER(); sym_complete_ok (np, cp); ++n; } else printf ("%s: bad DSA (%x) in done queue.\n", sym_name(np), (u_int) dsa); } np->dqueueget = i; return n; } /* * Complete all CCBs queued to the COMP queue. * * These CCBs are assumed: * - Not to be referenced either by devices or * SCRIPTS-related queues and datas. * - To have to be completed with an error condition * or requeued. * * The device queue freeze count is incremented * for each CCB that does not prevent this. * This function is called when all CCBs involved * in error handling/recovery have been reaped. */ static void sym_flush_comp_queue(struct sym_hcb *np, int cam_status) { SYM_QUEHEAD *qp; struct sym_ccb *cp; while ((qp = sym_remque_head(&np->comp_ccbq)) != NULL) { struct scsi_cmnd *cmd; cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq); /* Leave quiet CCBs waiting for resources */ if (cp->host_status == HS_WAIT) continue; cmd = cp->cmd; if (cam_status) sym_set_cam_status(cmd, cam_status); #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING if (sym_get_cam_status(cmd) == DID_SOFT_ERROR) { struct sym_tcb *tp = &np->target[cp->target]; struct sym_lcb *lp = sym_lp(tp, cp->lun); if (lp) { sym_remque(&cp->link2_ccbq); sym_insque_tail(&cp->link2_ccbq, &lp->waiting_ccbq); if (cp->started) { if (cp->tag != NO_TAG) --lp->started_tags; else --lp->started_no_tag; } } cp->started = 0; continue; } #endif sym_free_ccb(np, cp); sym_xpt_done(np, cmd); } } /* * Complete all active CCBs with error. * Used on CHIP/SCSI RESET. */ static void sym_flush_busy_queue (struct sym_hcb *np, int cam_status) { /* * Move all active CCBs to the COMP queue * and flush this queue. */ sym_que_splice(&np->busy_ccbq, &np->comp_ccbq); sym_que_init(&np->busy_ccbq); sym_flush_comp_queue(np, cam_status); } /* * Start chip. * * 'reason' means: * 0: initialisation. * 1: SCSI BUS RESET delivered or received. * 2: SCSI BUS MODE changed. */ void sym_start_up(struct Scsi_Host *shost, int reason) { struct sym_data *sym_data = shost_priv(shost); struct pci_dev *pdev = sym_data->pdev; struct sym_hcb *np = sym_data->ncb; int i; u32 phys; /* * Reset chip if asked, otherwise just clear fifos. */ if (reason == 1) sym_soft_reset(np); else { OUTB(np, nc_stest3, TE|CSF); OUTONB(np, nc_ctest3, CLF); } /* * Clear Start Queue */ phys = np->squeue_ba; for (i = 0; i < MAX_QUEUE*2; i += 2) { np->squeue[i] = cpu_to_scr(np->idletask_ba); np->squeue[i+1] = cpu_to_scr(phys + (i+2)*4); } np->squeue[MAX_QUEUE*2-1] = cpu_to_scr(phys); /* * Start at first entry. */ np->squeueput = 0; /* * Clear Done Queue */ phys = np->dqueue_ba; for (i = 0; i < MAX_QUEUE*2; i += 2) { np->dqueue[i] = 0; np->dqueue[i+1] = cpu_to_scr(phys + (i+2)*4); } np->dqueue[MAX_QUEUE*2-1] = cpu_to_scr(phys); /* * Start at first entry. */ np->dqueueget = 0; /* * Install patches in scripts. * This also let point to first position the start * and done queue pointers used from SCRIPTS. */ np->fw_patch(shost); /* * Wakeup all pending jobs. */ sym_flush_busy_queue(np, DID_RESET); /* * Init chip. */ OUTB(np, nc_istat, 0x00); /* Remove Reset, abort */ INB(np, nc_mbox1); udelay(2000); /* The 895 needs time for the bus mode to settle */ OUTB(np, nc_scntl0, np->rv_scntl0 | 0xc0); /* full arb., ena parity, par->ATN */ OUTB(np, nc_scntl1, 0x00); /* odd parity, and remove CRST!! */ sym_selectclock(np, np->rv_scntl3); /* Select SCSI clock */ OUTB(np, nc_scid , RRE|np->myaddr); /* Adapter SCSI address */ OUTW(np, nc_respid, 1ul<<np->myaddr); /* Id to respond to */ OUTB(np, nc_istat , SIGP ); /* Signal Process */ OUTB(np, nc_dmode , np->rv_dmode); /* Burst length, dma mode */ OUTB(np, nc_ctest5, np->rv_ctest5); /* Large fifo + large burst */ OUTB(np, nc_dcntl , NOCOM|np->rv_dcntl); /* Protect SFBR */ OUTB(np, nc_ctest3, np->rv_ctest3); /* Write and invalidate */ OUTB(np, nc_ctest4, np->rv_ctest4); /* Master parity checking */ /* Extended Sreq/Sack filtering not supported on the C10 */ if (np->features & FE_C10) OUTB(np, nc_stest2, np->rv_stest2); else OUTB(np, nc_stest2, EXT|np->rv_stest2); OUTB(np, nc_stest3, TE); /* TolerANT enable */ OUTB(np, nc_stime0, 0x0c); /* HTH disabled STO 0.25 sec */ /* * For now, disable AIP generation on C1010-66. */ if (pdev->device == PCI_DEVICE_ID_LSI_53C1010_66) OUTB(np, nc_aipcntl1, DISAIP); /* * C10101 rev. 0 errata. * Errant SGE's when in narrow. Write bits 4 & 5 of * STEST1 register to disable SGE. We probably should do * that from SCRIPTS for each selection/reselection, but * I just don't want. :) */ if (pdev->device == PCI_DEVICE_ID_LSI_53C1010_33 && pdev->revision < 1) OUTB(np, nc_stest1, INB(np, nc_stest1) | 0x30); /* * DEL 441 - 53C876 Rev 5 - Part Number 609-0392787/2788 - ITEM 2. * Disable overlapped arbitration for some dual function devices, * regardless revision id (kind of post-chip-design feature. ;-)) */ if (pdev->device == PCI_DEVICE_ID_NCR_53C875) OUTB(np, nc_ctest0, (1<<5)); else if (pdev->device == PCI_DEVICE_ID_NCR_53C896) np->rv_ccntl0 |= DPR; /* * Write CCNTL0/CCNTL1 for chips capable of 64 bit addressing * and/or hardware phase mismatch, since only such chips * seem to support those IO registers. */ if (np->features & (FE_DAC|FE_NOPM)) { OUTB(np, nc_ccntl0, np->rv_ccntl0); OUTB(np, nc_ccntl1, np->rv_ccntl1); } #if SYM_CONF_DMA_ADDRESSING_MODE == 2 /* * Set up scratch C and DRS IO registers to map the 32 bit * DMA address range our data structures are located in. */ if (use_dac(np)) { np->dmap_bah[0] = 0; /* ??? */ OUTL(np, nc_scrx[0], np->dmap_bah[0]); OUTL(np, nc_drs, np->dmap_bah[0]); } #endif /* * If phase mismatch handled by scripts (895A/896/1010), * set PM jump addresses. */ if (np->features & FE_NOPM) { OUTL(np, nc_pmjad1, SCRIPTB_BA(np, pm_handle)); OUTL(np, nc_pmjad2, SCRIPTB_BA(np, pm_handle)); } /* * Enable GPIO0 pin for writing if LED support from SCRIPTS. * Also set GPIO5 and clear GPIO6 if hardware LED control. */ if (np->features & FE_LED0) OUTB(np, nc_gpcntl, INB(np, nc_gpcntl) & ~0x01); else if (np->features & FE_LEDC) OUTB(np, nc_gpcntl, (INB(np, nc_gpcntl) & ~0x41) | 0x20); /* * enable ints */ OUTW(np, nc_sien , STO|HTH|MA|SGE|UDC|RST|PAR); OUTB(np, nc_dien , MDPE|BF|SSI|SIR|IID); /* * For 895/6 enable SBMC interrupt and save current SCSI bus mode. * Try to eat the spurious SBMC interrupt that may occur when * we reset the chip but not the SCSI BUS (at initialization). */ if (np->features & (FE_ULTRA2|FE_ULTRA3)) { OUTONW(np, nc_sien, SBMC); if (reason == 0) { INB(np, nc_mbox1); mdelay(100); INW(np, nc_sist); } np->scsi_mode = INB(np, nc_stest4) & SMODE; } /* * Fill in target structure. * Reinitialize usrsync. * Reinitialize usrwide. * Prepare sync negotiation according to actual SCSI bus mode. */ for (i=0;i<SYM_CONF_MAX_TARGET;i++) { struct sym_tcb *tp = &np->target[i]; tp->to_reset = 0; tp->head.sval = 0; tp->head.wval = np->rv_scntl3; tp->head.uval = 0; if (tp->lun0p) tp->lun0p->to_clear = 0; if (tp->lunmp) { int ln; for (ln = 1; ln < SYM_CONF_MAX_LUN; ln++) if (tp->lunmp[ln]) tp->lunmp[ln]->to_clear = 0; } } /* * Download SCSI SCRIPTS to on-chip RAM if present, * and start script processor. * We do the download preferently from the CPU. * For platforms that may not support PCI memory mapping, * we use simple SCRIPTS that performs MEMORY MOVEs. */ phys = SCRIPTA_BA(np, init); if (np->ram_ba) { if (sym_verbose >= 2) printf("%s: Downloading SCSI SCRIPTS.\n", sym_name(np)); memcpy_toio(np->s.ramaddr, np->scripta0, np->scripta_sz); if (np->features & FE_RAM8K) { memcpy_toio(np->s.ramaddr + 4096, np->scriptb0, np->scriptb_sz); phys = scr_to_cpu(np->scr_ram_seg); OUTL(np, nc_mmws, phys); OUTL(np, nc_mmrs, phys); OUTL(np, nc_sfs, phys); phys = SCRIPTB_BA(np, start64); } } np->istat_sem = 0; OUTL(np, nc_dsa, np->hcb_ba); OUTL_DSP(np, phys); /* * Notify the XPT about the RESET condition. */ if (reason != 0) sym_xpt_async_bus_reset(np); } /* * Switch trans mode for current job and its target. */ static void sym_settrans(struct sym_hcb *np, int target, u_char opts, u_char ofs, u_char per, u_char wide, u_char div, u_char fak) { SYM_QUEHEAD *qp; u_char sval, wval, uval; struct sym_tcb *tp = &np->target[target]; assert(target == (INB(np, nc_sdid) & 0x0f)); sval = tp->head.sval; wval = tp->head.wval; uval = tp->head.uval; #if 0 printf("XXXX sval=%x wval=%x uval=%x (%x)\n", sval, wval, uval, np->rv_scntl3); #endif /* * Set the offset. */ if (!(np->features & FE_C10)) sval = (sval & ~0x1f) | ofs; else sval = (sval & ~0x3f) | ofs; /* * Set the sync divisor and extra clock factor. */ if (ofs != 0) { wval = (wval & ~0x70) | ((div+1) << 4); if (!(np->features & FE_C10)) sval = (sval & ~0xe0) | (fak << 5); else { uval = uval & ~(XCLKH_ST|XCLKH_DT|XCLKS_ST|XCLKS_DT); if (fak >= 1) uval |= (XCLKH_ST|XCLKH_DT); if (fak >= 2) uval |= (XCLKS_ST|XCLKS_DT); } } /* * Set the bus width. */ wval = wval & ~EWS; if (wide != 0) wval |= EWS; /* * Set misc. ultra enable bits. */ if (np->features & FE_C10) { uval = uval & ~(U3EN|AIPCKEN); if (opts) { assert(np->features & FE_U3EN); uval |= U3EN; } } else { wval = wval & ~ULTRA; if (per <= 12) wval |= ULTRA; } /* * Stop there if sync parameters are unchanged. */ if (tp->head.sval == sval && tp->head.wval == wval && tp->head.uval == uval) return; tp->head.sval = sval; tp->head.wval = wval; tp->head.uval = uval; /* * Disable extended Sreq/Sack filtering if per < 50. * Not supported on the C1010. */ if (per < 50 && !(np->features & FE_C10)) OUTOFFB(np, nc_stest2, EXT); /* * set actual value and sync_status */ OUTB(np, nc_sxfer, tp->head.sval); OUTB(np, nc_scntl3, tp->head.wval); if (np->features & FE_C10) { OUTB(np, nc_scntl4, tp->head.uval); } /* * patch ALL busy ccbs of this target. */ FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { struct sym_ccb *cp; cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); if (cp->target != target) continue; cp->phys.select.sel_scntl3 = tp->head.wval; cp->phys.select.sel_sxfer = tp->head.sval; if (np->features & FE_C10) { cp->phys.select.sel_scntl4 = tp->head.uval; } } } static void sym_announce_transfer_rate(struct sym_tcb *tp) { struct scsi_target *starget = tp->starget; if (tp->tprint.period != spi_period(starget) || tp->tprint.offset != spi_offset(starget) || tp->tprint.width != spi_width(starget) || tp->tprint.iu != spi_iu(starget) || tp->tprint.dt != spi_dt(starget) || tp->tprint.qas != spi_qas(starget) || !tp->tprint.check_nego) { tp->tprint.period = spi_period(starget); tp->tprint.offset = spi_offset(starget); tp->tprint.width = spi_width(starget); tp->tprint.iu = spi_iu(starget); tp->tprint.dt = spi_dt(starget); tp->tprint.qas = spi_qas(starget); tp->tprint.check_nego = 1; spi_display_xfer_agreement(starget); } } /* * We received a WDTR. * Let everything be aware of the changes. */ static void sym_setwide(struct sym_hcb *np, int target, u_char wide) { struct sym_tcb *tp = &np->target[target]; struct scsi_target *starget = tp->starget; sym_settrans(np, target, 0, 0, 0, wide, 0, 0); if (wide) tp->tgoal.renego = NS_WIDE; else tp->tgoal.renego = 0; tp->tgoal.check_nego = 0; tp->tgoal.width = wide; spi_offset(starget) = 0; spi_period(starget) = 0; spi_width(starget) = wide; spi_iu(starget) = 0; spi_dt(starget) = 0; spi_qas(starget) = 0; if (sym_verbose >= 3) sym_announce_transfer_rate(tp); } /* * We received a SDTR. * Let everything be aware of the changes. */ static void sym_setsync(struct sym_hcb *np, int target, u_char ofs, u_char per, u_char div, u_char fak) { struct sym_tcb *tp = &np->target[target]; struct scsi_target *starget = tp->starget; u_char wide = (tp->head.wval & EWS) ? BUS_16_BIT : BUS_8_BIT; sym_settrans(np, target, 0, ofs, per, wide, div, fak); if (wide) tp->tgoal.renego = NS_WIDE; else if (ofs) tp->tgoal.renego = NS_SYNC; else tp->tgoal.renego = 0; spi_period(starget) = per; spi_offset(starget) = ofs; spi_iu(starget) = spi_dt(starget) = spi_qas(starget) = 0; if (!tp->tgoal.dt && !tp->tgoal.iu && !tp->tgoal.qas) { tp->tgoal.period = per; tp->tgoal.offset = ofs; tp->tgoal.check_nego = 0; } sym_announce_transfer_rate(tp); } /* * We received a PPR. * Let everything be aware of the changes. */ static void sym_setpprot(struct sym_hcb *np, int target, u_char opts, u_char ofs, u_char per, u_char wide, u_char div, u_char fak) { struct sym_tcb *tp = &np->target[target]; struct scsi_target *starget = tp->starget; sym_settrans(np, target, opts, ofs, per, wide, div, fak); if (wide || ofs) tp->tgoal.renego = NS_PPR; else tp->tgoal.renego = 0; spi_width(starget) = tp->tgoal.width = wide; spi_period(starget) = tp->tgoal.period = per; spi_offset(starget) = tp->tgoal.offset = ofs; spi_iu(starget) = tp->tgoal.iu = !!(opts & PPR_OPT_IU); spi_dt(starget) = tp->tgoal.dt = !!(opts & PPR_OPT_DT); spi_qas(starget) = tp->tgoal.qas = !!(opts & PPR_OPT_QAS); tp->tgoal.check_nego = 0; sym_announce_transfer_rate(tp); } /* * generic recovery from scsi interrupt * * The doc says that when the chip gets an SCSI interrupt, * it tries to stop in an orderly fashion, by completing * an instruction fetch that had started or by flushing * the DMA fifo for a write to memory that was executing. * Such a fashion is not enough to know if the instruction * that was just before the current DSP value has been * executed or not. * * There are some small SCRIPTS sections that deal with * the start queue and the done queue that may break any * assomption from the C code if we are interrupted * inside, so we reset if this happens. Btw, since these * SCRIPTS sections are executed while the SCRIPTS hasn't * started SCSI operations, it is very unlikely to happen. * * All the driver data structures are supposed to be * allocated from the same 4 GB memory window, so there * is a 1 to 1 relationship between DSA and driver data * structures. Since we are careful :) to invalidate the * DSA when we complete a command or when the SCRIPTS * pushes a DSA into a queue, we can trust it when it * points to a CCB. */ static void sym_recover_scsi_int (struct sym_hcb *np, u_char hsts) { u32 dsp = INL(np, nc_dsp); u32 dsa = INL(np, nc_dsa); struct sym_ccb *cp = sym_ccb_from_dsa(np, dsa); /* * If we haven't been interrupted inside the SCRIPTS * critical pathes, we can safely restart the SCRIPTS * and trust the DSA value if it matches a CCB. */ if ((!(dsp > SCRIPTA_BA(np, getjob_begin) && dsp < SCRIPTA_BA(np, getjob_end) + 1)) && (!(dsp > SCRIPTA_BA(np, ungetjob) && dsp < SCRIPTA_BA(np, reselect) + 1)) && (!(dsp > SCRIPTB_BA(np, sel_for_abort) && dsp < SCRIPTB_BA(np, sel_for_abort_1) + 1)) && (!(dsp > SCRIPTA_BA(np, done) && dsp < SCRIPTA_BA(np, done_end) + 1))) { OUTB(np, nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */ OUTB(np, nc_stest3, TE|CSF); /* clear scsi fifo */ /* * If we have a CCB, let the SCRIPTS call us back for * the handling of the error with SCRATCHA filled with * STARTPOS. This way, we will be able to freeze the * device queue and requeue awaiting IOs. */ if (cp) { cp->host_status = hsts; OUTL_DSP(np, SCRIPTA_BA(np, complete_error)); } /* * Otherwise just restart the SCRIPTS. */ else { OUTL(np, nc_dsa, 0xffffff); OUTL_DSP(np, SCRIPTA_BA(np, start)); } } else goto reset_all; return; reset_all: sym_start_reset(np); } /* * chip exception handler for selection timeout */ static void sym_int_sto (struct sym_hcb *np) { u32 dsp = INL(np, nc_dsp); if (DEBUG_FLAGS & DEBUG_TINY) printf ("T"); if (dsp == SCRIPTA_BA(np, wf_sel_done) + 8) sym_recover_scsi_int(np, HS_SEL_TIMEOUT); else sym_start_reset(np); } /* * chip exception handler for unexpected disconnect */ static void sym_int_udc (struct sym_hcb *np) { printf ("%s: unexpected disconnect\n", sym_name(np)); sym_recover_scsi_int(np, HS_UNEXPECTED); } /* * chip exception handler for SCSI bus mode change * * spi2-r12 11.2.3 says a transceiver mode change must * generate a reset event and a device that detects a reset * event shall initiate a hard reset. It says also that a * device that detects a mode change shall set data transfer * mode to eight bit asynchronous, etc... * So, just reinitializing all except chip should be enough. */ static void sym_int_sbmc(struct Scsi_Host *shost) { struct sym_hcb *np = sym_get_hcb(shost); u_char scsi_mode = INB(np, nc_stest4) & SMODE; /* * Notify user. */ printf("%s: SCSI BUS mode change from %s to %s.\n", sym_name(np), sym_scsi_bus_mode(np->scsi_mode), sym_scsi_bus_mode(scsi_mode)); /* * Should suspend command processing for a few seconds and * reinitialize all except the chip. */ sym_start_up(shost, 2); } /* * chip exception handler for SCSI parity error. * * When the chip detects a SCSI parity error and is * currently executing a (CH)MOV instruction, it does * not interrupt immediately, but tries to finish the * transfer of the current scatter entry before * interrupting. The following situations may occur: * * - The complete scatter entry has been transferred * without the device having changed phase. * The chip will then interrupt with the DSP pointing * to the instruction that follows the MOV. * * - A phase mismatch occurs before the MOV finished * and phase errors are to be handled by the C code. * The chip will then interrupt with both PAR and MA * conditions set. * * - A phase mismatch occurs before the MOV finished and * phase errors are to be handled by SCRIPTS. * The chip will load the DSP with the phase mismatch * JUMP address and interrupt the host processor. */ static void sym_int_par (struct sym_hcb *np, u_short sist) { u_char hsts = INB(np, HS_PRT); u32 dsp = INL(np, nc_dsp); u32 dbc = INL(np, nc_dbc); u32 dsa = INL(np, nc_dsa); u_char sbcl = INB(np, nc_sbcl); u_char cmd = dbc >> 24; int phase = cmd & 7; struct sym_ccb *cp = sym_ccb_from_dsa(np, dsa); if (printk_ratelimit()) printf("%s: SCSI parity error detected: SCR1=%d DBC=%x SBCL=%x\n", sym_name(np), hsts, dbc, sbcl); /* * Check that the chip is connected to the SCSI BUS. */ if (!(INB(np, nc_scntl1) & ISCON)) { sym_recover_scsi_int(np, HS_UNEXPECTED); return; } /* * If the nexus is not clearly identified, reset the bus. * We will try to do better later. */ if (!cp) goto reset_all; /* * Check instruction was a MOV, direction was INPUT and * ATN is asserted. */ if ((cmd & 0xc0) || !(phase & 1) || !(sbcl & 0x8)) goto reset_all; /* * Keep track of the parity error. */ OUTONB(np, HF_PRT, HF_EXT_ERR); cp->xerr_status |= XE_PARITY_ERR; /* * Prepare the message to send to the device. */ np->msgout[0] = (phase == 7) ? M_PARITY : M_ID_ERROR; /* * If the old phase was DATA IN phase, we have to deal with * the 3 situations described above. * For other input phases (MSG IN and STATUS), the device * must resend the whole thing that failed parity checking * or signal error. So, jumping to dispatcher should be OK. */ if (phase == 1 || phase == 5) { /* Phase mismatch handled by SCRIPTS */ if (dsp == SCRIPTB_BA(np, pm_handle)) OUTL_DSP(np, dsp); /* Phase mismatch handled by the C code */ else if (sist & MA) sym_int_ma (np); /* No phase mismatch occurred */ else { sym_set_script_dp (np, cp, dsp); OUTL_DSP(np, SCRIPTA_BA(np, dispatch)); } } else if (phase == 7) /* We definitely cannot handle parity errors */ #if 1 /* in message-in phase due to the relection */ goto reset_all; /* path and various message anticipations. */ #else OUTL_DSP(np, SCRIPTA_BA(np, clrack)); #endif else OUTL_DSP(np, SCRIPTA_BA(np, dispatch)); return; reset_all: sym_start_reset(np); return; } /* * chip exception handler for phase errors. * * We have to construct a new transfer descriptor, * to transfer the rest of the current block. */ static void sym_int_ma (struct sym_hcb *np) { u32 dbc; u32 rest; u32 dsp; u32 dsa; u32 nxtdsp; u32 *vdsp; u32 oadr, olen; u32 *tblp; u32 newcmd; u_int delta; u_char cmd; u_char hflags, hflags0; struct sym_pmc *pm; struct sym_ccb *cp; dsp = INL(np, nc_dsp); dbc = INL(np, nc_dbc); dsa = INL(np, nc_dsa); cmd = dbc >> 24; rest = dbc & 0xffffff; delta = 0; /* * locate matching cp if any. */ cp = sym_ccb_from_dsa(np, dsa); /* * Donnot take into account dma fifo and various buffers in * INPUT phase since the chip flushes everything before * raising the MA interrupt for interrupted INPUT phases. * For DATA IN phase, we will check for the SWIDE later. */ if ((cmd & 7) != 1 && (cmd & 7) != 5) { u_char ss0, ss2; if (np->features & FE_DFBC) delta = INW(np, nc_dfbc); else { u32 dfifo; /* * Read DFIFO, CTEST[4-6] using 1 PCI bus ownership. */ dfifo = INL(np, nc_dfifo); /* * Calculate remaining bytes in DMA fifo. * (CTEST5 = dfifo >> 16) */ if (dfifo & (DFS << 16)) delta = ((((dfifo >> 8) & 0x300) | (dfifo & 0xff)) - rest) & 0x3ff; else delta = ((dfifo & 0xff) - rest) & 0x7f; } /* * The data in the dma fifo has not been transferred to * the target -> add the amount to the rest * and clear the data. * Check the sstat2 register in case of wide transfer. */ rest += delta; ss0 = INB(np, nc_sstat0); if (ss0 & OLF) rest++; if (!(np->features & FE_C10)) if (ss0 & ORF) rest++; if (cp && (cp->phys.select.sel_scntl3 & EWS)) { ss2 = INB(np, nc_sstat2); if (ss2 & OLF1) rest++; if (!(np->features & FE_C10)) if (ss2 & ORF1) rest++; } /* * Clear fifos. */ OUTB(np, nc_ctest3, np->rv_ctest3 | CLF); /* dma fifo */ OUTB(np, nc_stest3, TE|CSF); /* scsi fifo */ } /* * log the information */ if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_PHASE)) printf ("P%x%x RL=%d D=%d ", cmd&7, INB(np, nc_sbcl)&7, (unsigned) rest, (unsigned) delta); /* * try to find the interrupted script command, * and the address at which to continue. */ vdsp = NULL; nxtdsp = 0; if (dsp > np->scripta_ba && dsp <= np->scripta_ba + np->scripta_sz) { vdsp = (u32 *)((char*)np->scripta0 + (dsp-np->scripta_ba-8)); nxtdsp = dsp; } else if (dsp > np->scriptb_ba && dsp <= np->scriptb_ba + np->scriptb_sz) { vdsp = (u32 *)((char*)np->scriptb0 + (dsp-np->scriptb_ba-8)); nxtdsp = dsp; } /* * log the information */ if (DEBUG_FLAGS & DEBUG_PHASE) { printf ("\nCP=%p DSP=%x NXT=%x VDSP=%p CMD=%x ", cp, (unsigned)dsp, (unsigned)nxtdsp, vdsp, cmd); } if (!vdsp) { printf ("%s: interrupted SCRIPT address not found.\n", sym_name (np)); goto reset_all; } if (!cp) { printf ("%s: SCSI phase error fixup: CCB already dequeued.\n", sym_name (np)); goto reset_all; } /* * get old startaddress and old length. */ oadr = scr_to_cpu(vdsp[1]); if (cmd & 0x10) { /* Table indirect */ tblp = (u32 *) ((char*) &cp->phys + oadr); olen = scr_to_cpu(tblp[0]); oadr = scr_to_cpu(tblp[1]); } else { tblp = (u32 *) 0; olen = scr_to_cpu(vdsp[0]) & 0xffffff; } if (DEBUG_FLAGS & DEBUG_PHASE) { printf ("OCMD=%x\nTBLP=%p OLEN=%x OADR=%x\n", (unsigned) (scr_to_cpu(vdsp[0]) >> 24), tblp, (unsigned) olen, (unsigned) oadr); } /* * check cmd against assumed interrupted script command. * If dt data phase, the MOVE instruction hasn't bit 4 of * the phase. */ if (((cmd & 2) ? cmd : (cmd & ~4)) != (scr_to_cpu(vdsp[0]) >> 24)) { sym_print_addr(cp->cmd, "internal error: cmd=%02x != %02x=(vdsp[0] >> 24)\n", cmd, scr_to_cpu(vdsp[0]) >> 24); goto reset_all; } /* * if old phase not dataphase, leave here. */ if (cmd & 2) { sym_print_addr(cp->cmd, "phase change %x-%x %d@%08x resid=%d.\n", cmd&7, INB(np, nc_sbcl)&7, (unsigned)olen, (unsigned)oadr, (unsigned)rest); goto unexpected_phase; } /* * Choose the correct PM save area. * * Look at the PM_SAVE SCRIPT if you want to understand * this stuff. The equivalent code is implemented in * SCRIPTS for the 895A, 896 and 1010 that are able to * handle PM from the SCRIPTS processor. */ hflags0 = INB(np, HF_PRT); hflags = hflags0; if (hflags & (HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED)) { if (hflags & HF_IN_PM0) nxtdsp = scr_to_cpu(cp->phys.pm0.ret); else if (hflags & HF_IN_PM1) nxtdsp = scr_to_cpu(cp->phys.pm1.ret); if (hflags & HF_DP_SAVED) hflags ^= HF_ACT_PM; } if (!(hflags & HF_ACT_PM)) { pm = &cp->phys.pm0; newcmd = SCRIPTA_BA(np, pm0_data); } else { pm = &cp->phys.pm1; newcmd = SCRIPTA_BA(np, pm1_data); } hflags &= ~(HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED); if (hflags != hflags0) OUTB(np, HF_PRT, hflags); /* * fillin the phase mismatch context */ pm->sg.addr = cpu_to_scr(oadr + olen - rest); pm->sg.size = cpu_to_scr(rest); pm->ret = cpu_to_scr(nxtdsp); /* * If we have a SWIDE, * - prepare the address to write the SWIDE from SCRIPTS, * - compute the SCRIPTS address to restart from, * - move current data pointer context by one byte. */ nxtdsp = SCRIPTA_BA(np, dispatch); if ((cmd & 7) == 1 && cp && (cp->phys.select.sel_scntl3 & EWS) && (INB(np, nc_scntl2) & WSR)) { u32 tmp; /* * Set up the table indirect for the MOVE * of the residual byte and adjust the data * pointer context. */ tmp = scr_to_cpu(pm->sg.addr); cp->phys.wresid.addr = cpu_to_scr(tmp); pm->sg.addr = cpu_to_scr(tmp + 1); tmp = scr_to_cpu(pm->sg.size); cp->phys.wresid.size = cpu_to_scr((tmp&0xff000000) | 1); pm->sg.size = cpu_to_scr(tmp - 1); /* * If only the residual byte is to be moved, * no PM context is needed. */ if ((tmp&0xffffff) == 1) newcmd = pm->ret; /* * Prepare the address of SCRIPTS that will * move the residual byte to memory. */ nxtdsp = SCRIPTB_BA(np, wsr_ma_helper); } if (DEBUG_FLAGS & DEBUG_PHASE) { sym_print_addr(cp->cmd, "PM %x %x %x / %x %x %x.\n", hflags0, hflags, newcmd, (unsigned)scr_to_cpu(pm->sg.addr), (unsigned)scr_to_cpu(pm->sg.size), (unsigned)scr_to_cpu(pm->ret)); } /* * Restart the SCRIPTS processor. */ sym_set_script_dp (np, cp, newcmd); OUTL_DSP(np, nxtdsp); return; /* * Unexpected phase changes that occurs when the current phase * is not a DATA IN or DATA OUT phase are due to error conditions. * Such event may only happen when the SCRIPTS is using a * multibyte SCSI MOVE. * * Phase change Some possible cause * * COMMAND --> MSG IN SCSI parity error detected by target. * COMMAND --> STATUS Bad command or refused by target. * MSG OUT --> MSG IN Message rejected by target. * MSG OUT --> COMMAND Bogus target that discards extended * negotiation messages. * * The code below does not care of the new phase and so * trusts the target. Why to annoy it ? * If the interrupted phase is COMMAND phase, we restart at * dispatcher. * If a target does not get all the messages after selection, * the code assumes blindly that the target discards extended * messages and clears the negotiation status. * If the target does not want all our response to negotiation, * we force a SIR_NEGO_PROTO interrupt (it is a hack that avoids * bloat for such a should_not_happen situation). * In all other situation, we reset the BUS. * Are these assumptions reasonable ? (Wait and see ...) */ unexpected_phase: dsp -= 8; nxtdsp = 0; switch (cmd & 7) { case 2: /* COMMAND phase */ nxtdsp = SCRIPTA_BA(np, dispatch); break; #if 0 case 3: /* STATUS phase */ nxtdsp = SCRIPTA_BA(np, dispatch); break; #endif case 6: /* MSG OUT phase */ /* * If the device may want to use untagged when we want * tagged, we prepare an IDENTIFY without disc. granted, * since we will not be able to handle reselect. * Otherwise, we just don't care. */ if (dsp == SCRIPTA_BA(np, send_ident)) { if (cp->tag != NO_TAG && olen - rest <= 3) { cp->host_status = HS_BUSY; np->msgout[0] = IDENTIFY(0, cp->lun); nxtdsp = SCRIPTB_BA(np, ident_break_atn); } else nxtdsp = SCRIPTB_BA(np, ident_break); } else if (dsp == SCRIPTB_BA(np, send_wdtr) || dsp == SCRIPTB_BA(np, send_sdtr) || dsp == SCRIPTB_BA(np, send_ppr)) { nxtdsp = SCRIPTB_BA(np, nego_bad_phase); if (dsp == SCRIPTB_BA(np, send_ppr)) { struct scsi_device *dev = cp->cmd->device; dev->ppr = 0; } } break; #if 0 case 7: /* MSG IN phase */ nxtdsp = SCRIPTA_BA(np, clrack); break; #endif } if (nxtdsp) { OUTL_DSP(np, nxtdsp); return; } reset_all: sym_start_reset(np); } /* * chip interrupt handler * * In normal situations, interrupt conditions occur one at * a time. But when something bad happens on the SCSI BUS, * the chip may raise several interrupt flags before * stopping and interrupting the CPU. The additionnal * interrupt flags are stacked in some extra registers * after the SIP and/or DIP flag has been raised in the * ISTAT. After the CPU has read the interrupt condition * flag from SIST or DSTAT, the chip unstacks the other * interrupt flags and sets the corresponding bits in * SIST or DSTAT. Since the chip starts stacking once the * SIP or DIP flag is set, there is a small window of time * where the stacking does not occur. * * Typically, multiple interrupt conditions may happen in * the following situations: * * - SCSI parity error + Phase mismatch (PAR|MA) * When an parity error is detected in input phase * and the device switches to msg-in phase inside a * block MOV. * - SCSI parity error + Unexpected disconnect (PAR|UDC) * When a stupid device does not want to handle the * recovery of an SCSI parity error. * - Some combinations of STO, PAR, UDC, ... * When using non compliant SCSI stuff, when user is * doing non compliant hot tampering on the BUS, when * something really bad happens to a device, etc ... * * The heuristic suggested by SYMBIOS to handle * multiple interrupts is to try unstacking all * interrupts conditions and to handle them on some * priority based on error severity. * This will work when the unstacking has been * successful, but we cannot be 100 % sure of that, * since the CPU may have been faster to unstack than * the chip is able to stack. Hmmm ... But it seems that * such a situation is very unlikely to happen. * * If this happen, for example STO caught by the CPU * then UDC happenning before the CPU have restarted * the SCRIPTS, the driver may wrongly complete the * same command on UDC, since the SCRIPTS didn't restart * and the DSA still points to the same command. * We avoid this situation by setting the DSA to an * invalid value when the CCB is completed and before * restarting the SCRIPTS. * * Another issue is that we need some section of our * recovery procedures to be somehow uninterruptible but * the SCRIPTS processor does not provides such a * feature. For this reason, we handle recovery preferently * from the C code and check against some SCRIPTS critical * sections from the C code. * * Hopefully, the interrupt handling of the driver is now * able to resist to weird BUS error conditions, but donnot * ask me for any guarantee that it will never fail. :-) * Use at your own decision and risk. */ irqreturn_t sym_interrupt(struct Scsi_Host *shost) { struct sym_data *sym_data = shost_priv(shost); struct sym_hcb *np = sym_data->ncb; struct pci_dev *pdev = sym_data->pdev; u_char istat, istatc; u_char dstat; u_short sist; /* * interrupt on the fly ? * (SCRIPTS may still be running) * * A `dummy read' is needed to ensure that the * clear of the INTF flag reaches the device * and that posted writes are flushed to memory * before the scanning of the DONE queue. * Note that SCRIPTS also (dummy) read to memory * prior to deliver the INTF interrupt condition. */ istat = INB(np, nc_istat); if (istat & INTF) { OUTB(np, nc_istat, (istat & SIGP) | INTF | np->istat_sem); istat |= INB(np, nc_istat); /* DUMMY READ */ if (DEBUG_FLAGS & DEBUG_TINY) printf ("F "); sym_wakeup_done(np); } if (!(istat & (SIP|DIP))) return (istat & INTF) ? IRQ_HANDLED : IRQ_NONE; #if 0 /* We should never get this one */ if (istat & CABRT) OUTB(np, nc_istat, CABRT); #endif /* * PAR and MA interrupts may occur at the same time, * and we need to know of both in order to handle * this situation properly. We try to unstack SCSI * interrupts for that reason. BTW, I dislike a LOT * such a loop inside the interrupt routine. * Even if DMA interrupt stacking is very unlikely to * happen, we also try unstacking these ones, since * this has no performance impact. */ sist = 0; dstat = 0; istatc = istat; do { if (istatc & SIP) sist |= INW(np, nc_sist); if (istatc & DIP) dstat |= INB(np, nc_dstat); istatc = INB(np, nc_istat); istat |= istatc; /* Prevent deadlock waiting on a condition that may * never clear. */ if (unlikely(sist == 0xffff && dstat == 0xff)) { if (pci_channel_offline(pdev)) return IRQ_NONE; } } while (istatc & (SIP|DIP)); if (DEBUG_FLAGS & DEBUG_TINY) printf ("<%d|%x:%x|%x:%x>", (int)INB(np, nc_scr0), dstat,sist, (unsigned)INL(np, nc_dsp), (unsigned)INL(np, nc_dbc)); /* * On paper, a memory read barrier may be needed here to * prevent out of order LOADs by the CPU from having * prefetched stale data prior to DMA having occurred. * And since we are paranoid ... :) */ MEMORY_READ_BARRIER(); /* * First, interrupts we want to service cleanly. * * Phase mismatch (MA) is the most frequent interrupt * for chip earlier than the 896 and so we have to service * it as quickly as possible. * A SCSI parity error (PAR) may be combined with a phase * mismatch condition (MA). * Programmed interrupts (SIR) are used to call the C code * from SCRIPTS. * The single step interrupt (SSI) is not used in this * driver. */ if (!(sist & (STO|GEN|HTH|SGE|UDC|SBMC|RST)) && !(dstat & (MDPE|BF|ABRT|IID))) { if (sist & PAR) sym_int_par (np, sist); else if (sist & MA) sym_int_ma (np); else if (dstat & SIR) sym_int_sir(np); else if (dstat & SSI) OUTONB_STD(); else goto unknown_int; return IRQ_HANDLED; } /* * Now, interrupts that donnot happen in normal * situations and that we may need to recover from. * * On SCSI RESET (RST), we reset everything. * On SCSI BUS MODE CHANGE (SBMC), we complete all * active CCBs with RESET status, prepare all devices * for negotiating again and restart the SCRIPTS. * On STO and UDC, we complete the CCB with the corres- * ponding status and restart the SCRIPTS. */ if (sist & RST) { printf("%s: SCSI BUS reset detected.\n", sym_name(np)); sym_start_up(shost, 1); return IRQ_HANDLED; } OUTB(np, nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */ OUTB(np, nc_stest3, TE|CSF); /* clear scsi fifo */ if (!(sist & (GEN|HTH|SGE)) && !(dstat & (MDPE|BF|ABRT|IID))) { if (sist & SBMC) sym_int_sbmc(shost); else if (sist & STO) sym_int_sto (np); else if (sist & UDC) sym_int_udc (np); else goto unknown_int; return IRQ_HANDLED; } /* * Now, interrupts we are not able to recover cleanly. * * Log message for hard errors. * Reset everything. */ sym_log_hard_error(shost, sist, dstat); if ((sist & (GEN|HTH|SGE)) || (dstat & (MDPE|BF|ABRT|IID))) { sym_start_reset(np); return IRQ_HANDLED; } unknown_int: /* * We just miss the cause of the interrupt. :( * Print a message. The timeout will do the real work. */ printf( "%s: unknown interrupt(s) ignored, " "ISTAT=0x%x DSTAT=0x%x SIST=0x%x\n", sym_name(np), istat, dstat, sist); return IRQ_NONE; } /* * Dequeue from the START queue all CCBs that match * a given target/lun/task condition (-1 means all), * and move them from the BUSY queue to the COMP queue * with DID_SOFT_ERROR status condition. * This function is used during error handling/recovery. * It is called with SCRIPTS not running. */ static int sym_dequeue_from_squeue(struct sym_hcb *np, int i, int target, int lun, int task) { int j; struct sym_ccb *cp; /* * Make sure the starting index is within range. */ assert((i >= 0) && (i < 2*MAX_QUEUE)); /* * Walk until end of START queue and dequeue every job * that matches the target/lun/task condition. */ j = i; while (i != np->squeueput) { cp = sym_ccb_from_dsa(np, scr_to_cpu(np->squeue[i])); assert(cp); #ifdef SYM_CONF_IARB_SUPPORT /* Forget hints for IARB, they may be no longer relevant */ cp->host_flags &= ~HF_HINT_IARB; #endif if ((target == -1 || cp->target == target) && (lun == -1 || cp->lun == lun) && (task == -1 || cp->tag == task)) { #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING sym_set_cam_status(cp->cmd, DID_SOFT_ERROR); #else sym_set_cam_status(cp->cmd, DID_REQUEUE); #endif sym_remque(&cp->link_ccbq); sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq); } else { if (i != j) np->squeue[j] = np->squeue[i]; if ((j += 2) >= MAX_QUEUE*2) j = 0; } if ((i += 2) >= MAX_QUEUE*2) i = 0; } if (i != j) /* Copy back the idle task if needed */ np->squeue[j] = np->squeue[i]; np->squeueput = j; /* Update our current start queue pointer */ return (i - j) / 2; } /* * chip handler for bad SCSI status condition * * In case of bad SCSI status, we unqueue all the tasks * currently queued to the controller but not yet started * and then restart the SCRIPTS processor immediately. * * QUEUE FULL and BUSY conditions are handled the same way. * Basically all the not yet started tasks are requeued in * device queue and the queue is frozen until a completion. * * For CHECK CONDITION and COMMAND TERMINATED status, we use * the CCB of the failed command to prepare a REQUEST SENSE * SCSI command and queue it to the controller queue. * * SCRATCHA is assumed to have been loaded with STARTPOS * before the SCRIPTS called the C code. */ static void sym_sir_bad_scsi_status(struct sym_hcb *np, int num, struct sym_ccb *cp) { u32 startp; u_char s_status = cp->ssss_status; u_char h_flags = cp->host_flags; int msglen; int i; /* * Compute the index of the next job to start from SCRIPTS. */ i = (INL(np, nc_scratcha) - np->squeue_ba) / 4; /* * The last CCB queued used for IARB hint may be * no longer relevant. Forget it. */ #ifdef SYM_CONF_IARB_SUPPORT if (np->last_cp) np->last_cp = 0; #endif /* * Now deal with the SCSI status. */ switch(s_status) { case S_BUSY: case S_QUEUE_FULL: if (sym_verbose >= 2) { sym_print_addr(cp->cmd, "%s\n", s_status == S_BUSY ? "BUSY" : "QUEUE FULL\n"); } fallthrough; default: /* S_INT, S_INT_COND_MET, S_CONFLICT */ sym_complete_error (np, cp); break; case S_TERMINATED: case S_CHECK_COND: /* * If we get an SCSI error when requesting sense, give up. */ if (h_flags & HF_SENSE) { sym_complete_error (np, cp); break; } /* * Dequeue all queued CCBs for that device not yet started, * and restart the SCRIPTS processor immediately. */ sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1); OUTL_DSP(np, SCRIPTA_BA(np, start)); /* * Save some info of the actual IO. * Compute the data residual. */ cp->sv_scsi_status = cp->ssss_status; cp->sv_xerr_status = cp->xerr_status; cp->sv_resid = sym_compute_residual(np, cp); /* * Prepare all needed data structures for * requesting sense data. */ cp->scsi_smsg2[0] = IDENTIFY(0, cp->lun); msglen = 1; /* * If we are currently using anything different from * async. 8 bit data transfers with that target, * start a negotiation, since the device may want * to report us a UNIT ATTENTION condition due to * a cause we currently ignore, and we donnot want * to be stuck with WIDE and/or SYNC data transfer. * * cp->nego_status is filled by sym_prepare_nego(). */ cp->nego_status = 0; msglen += sym_prepare_nego(np, cp, &cp->scsi_smsg2[msglen]); /* * Message table indirect structure. */ cp->phys.smsg.addr = CCB_BA(cp, scsi_smsg2); cp->phys.smsg.size = cpu_to_scr(msglen); /* * sense command */ cp->phys.cmd.addr = CCB_BA(cp, sensecmd); cp->phys.cmd.size = cpu_to_scr(6); /* * patch requested size into sense command */ cp->sensecmd[0] = REQUEST_SENSE; cp->sensecmd[1] = 0; if (cp->cmd->device->scsi_level <= SCSI_2 && cp->lun <= 7) cp->sensecmd[1] = cp->lun << 5; cp->sensecmd[4] = SYM_SNS_BBUF_LEN; cp->data_len = SYM_SNS_BBUF_LEN; /* * sense data */ memset(cp->sns_bbuf, 0, SYM_SNS_BBUF_LEN); cp->phys.sense.addr = CCB_BA(cp, sns_bbuf); cp->phys.sense.size = cpu_to_scr(SYM_SNS_BBUF_LEN); /* * requeue the command. */ startp = SCRIPTB_BA(np, sdata_in); cp->phys.head.savep = cpu_to_scr(startp); cp->phys.head.lastp = cpu_to_scr(startp); cp->startp = cpu_to_scr(startp); cp->goalp = cpu_to_scr(startp + 16); cp->host_xflags = 0; cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY; cp->ssss_status = S_ILLEGAL; cp->host_flags = (HF_SENSE|HF_DATA_IN); cp->xerr_status = 0; cp->extra_bytes = 0; cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA(np, select)); /* * Requeue the command. */ sym_put_start_queue(np, cp); /* * Give back to upper layer everything we have dequeued. */ sym_flush_comp_queue(np, 0); break; } } /* * After a device has accepted some management message * as BUS DEVICE RESET, ABORT TASK, etc ..., or when * a device signals a UNIT ATTENTION condition, some * tasks are thrown away by the device. We are required * to reflect that on our tasks list since the device * will never complete these tasks. * * This function move from the BUSY queue to the COMP * queue all disconnected CCBs for a given target that * match the following criteria: * - lun=-1 means any logical UNIT otherwise a given one. * - task=-1 means any task, otherwise a given one. */ int sym_clear_tasks(struct sym_hcb *np, int cam_status, int target, int lun, int task) { SYM_QUEHEAD qtmp, *qp; int i = 0; struct sym_ccb *cp; /* * Move the entire BUSY queue to our temporary queue. */ sym_que_init(&qtmp); sym_que_splice(&np->busy_ccbq, &qtmp); sym_que_init(&np->busy_ccbq); /* * Put all CCBs that matches our criteria into * the COMP queue and put back other ones into * the BUSY queue. */ while ((qp = sym_remque_head(&qtmp)) != NULL) { struct scsi_cmnd *cmd; cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); cmd = cp->cmd; if (cp->host_status != HS_DISCONNECT || cp->target != target || (lun != -1 && cp->lun != lun) || (task != -1 && (cp->tag != NO_TAG && cp->scsi_smsg[2] != task))) { sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq); continue; } sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq); /* Preserve the software timeout condition */ if (sym_get_cam_status(cmd) != DID_TIME_OUT) sym_set_cam_status(cmd, cam_status); ++i; #if 0 printf("XXXX TASK @%p CLEARED\n", cp); #endif } return i; } /* * chip handler for TASKS recovery * * We cannot safely abort a command, while the SCRIPTS * processor is running, since we just would be in race * with it. * * As long as we have tasks to abort, we keep the SEM * bit set in the ISTAT. When this bit is set, the * SCRIPTS processor interrupts (SIR_SCRIPT_STOPPED) * each time it enters the scheduler. * * If we have to reset a target, clear tasks of a unit, * or to perform the abort of a disconnected job, we * restart the SCRIPTS for selecting the target. Once * selected, the SCRIPTS interrupts (SIR_TARGET_SELECTED). * If it loses arbitration, the SCRIPTS will interrupt again * the next time it will enter its scheduler, and so on ... * * On SIR_TARGET_SELECTED, we scan for the more * appropriate thing to do: * * - If nothing, we just sent a M_ABORT message to the * target to get rid of the useless SCSI bus ownership. * According to the specs, no tasks shall be affected. * - If the target is to be reset, we send it a M_RESET * message. * - If a logical UNIT is to be cleared , we send the * IDENTIFY(lun) + M_ABORT. * - If an untagged task is to be aborted, we send the * IDENTIFY(lun) + M_ABORT. * - If a tagged task is to be aborted, we send the * IDENTIFY(lun) + task attributes + M_ABORT_TAG. * * Once our 'kiss of death' :) message has been accepted * by the target, the SCRIPTS interrupts again * (SIR_ABORT_SENT). On this interrupt, we complete * all the CCBs that should have been aborted by the * target according to our message. */ static void sym_sir_task_recovery(struct sym_hcb *np, int num) { SYM_QUEHEAD *qp; struct sym_ccb *cp; struct sym_tcb *tp = NULL; /* gcc isn't quite smart enough yet */ struct scsi_target *starget; int target=-1, lun=-1, task; int i, k; switch(num) { /* * The SCRIPTS processor stopped before starting * the next command in order to allow us to perform * some task recovery. */ case SIR_SCRIPT_STOPPED: /* * Do we have any target to reset or unit to clear ? */ for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { tp = &np->target[i]; if (tp->to_reset || (tp->lun0p && tp->lun0p->to_clear)) { target = i; break; } if (!tp->lunmp) continue; for (k = 1 ; k < SYM_CONF_MAX_LUN ; k++) { if (tp->lunmp[k] && tp->lunmp[k]->to_clear) { target = i; break; } } if (target != -1) break; } /* * If not, walk the busy queue for any * disconnected CCB to be aborted. */ if (target == -1) { FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { cp = sym_que_entry(qp,struct sym_ccb,link_ccbq); if (cp->host_status != HS_DISCONNECT) continue; if (cp->to_abort) { target = cp->target; break; } } } /* * If some target is to be selected, * prepare and start the selection. */ if (target != -1) { tp = &np->target[target]; np->abrt_sel.sel_id = target; np->abrt_sel.sel_scntl3 = tp->head.wval; np->abrt_sel.sel_sxfer = tp->head.sval; OUTL(np, nc_dsa, np->hcb_ba); OUTL_DSP(np, SCRIPTB_BA(np, sel_for_abort)); return; } /* * Now look for a CCB to abort that haven't started yet. * Btw, the SCRIPTS processor is still stopped, so * we are not in race. */ i = 0; cp = NULL; FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); if (cp->host_status != HS_BUSY && cp->host_status != HS_NEGOTIATE) continue; if (!cp->to_abort) continue; #ifdef SYM_CONF_IARB_SUPPORT /* * If we are using IMMEDIATE ARBITRATION, we donnot * want to cancel the last queued CCB, since the * SCRIPTS may have anticipated the selection. */ if (cp == np->last_cp) { cp->to_abort = 0; continue; } #endif i = 1; /* Means we have found some */ break; } if (!i) { /* * We are done, so we donnot need * to synchronize with the SCRIPTS anylonger. * Remove the SEM flag from the ISTAT. */ np->istat_sem = 0; OUTB(np, nc_istat, SIGP); break; } /* * Compute index of next position in the start * queue the SCRIPTS intends to start and dequeue * all CCBs for that device that haven't been started. */ i = (INL(np, nc_scratcha) - np->squeue_ba) / 4; i = sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1); /* * Make sure at least our IO to abort has been dequeued. */ #ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING assert(i && sym_get_cam_status(cp->cmd) == DID_SOFT_ERROR); #else sym_remque(&cp->link_ccbq); sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq); #endif /* * Keep track in cam status of the reason of the abort. */ if (cp->to_abort == 2) sym_set_cam_status(cp->cmd, DID_TIME_OUT); else sym_set_cam_status(cp->cmd, DID_ABORT); /* * Complete with error everything that we have dequeued. */ sym_flush_comp_queue(np, 0); break; /* * The SCRIPTS processor has selected a target * we may have some manual recovery to perform for. */ case SIR_TARGET_SELECTED: target = INB(np, nc_sdid) & 0xf; tp = &np->target[target]; np->abrt_tbl.addr = cpu_to_scr(vtobus(np->abrt_msg)); /* * If the target is to be reset, prepare a * M_RESET message and clear the to_reset flag * since we donnot expect this operation to fail. */ if (tp->to_reset) { np->abrt_msg[0] = M_RESET; np->abrt_tbl.size = 1; tp->to_reset = 0; break; } /* * Otherwise, look for some logical unit to be cleared. */ if (tp->lun0p && tp->lun0p->to_clear) lun = 0; else if (tp->lunmp) { for (k = 1 ; k < SYM_CONF_MAX_LUN ; k++) { if (tp->lunmp[k] && tp->lunmp[k]->to_clear) { lun = k; break; } } } /* * If a logical unit is to be cleared, prepare * an IDENTIFY(lun) + ABORT MESSAGE. */ if (lun != -1) { struct sym_lcb *lp = sym_lp(tp, lun); lp->to_clear = 0; /* We don't expect to fail here */ np->abrt_msg[0] = IDENTIFY(0, lun); np->abrt_msg[1] = M_ABORT; np->abrt_tbl.size = 2; break; } /* * Otherwise, look for some disconnected job to * abort for this target. */ i = 0; cp = NULL; FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); if (cp->host_status != HS_DISCONNECT) continue; if (cp->target != target) continue; if (!cp->to_abort) continue; i = 1; /* Means we have some */ break; } /* * If we have none, probably since the device has * completed the command before we won abitration, * send a M_ABORT message without IDENTIFY. * According to the specs, the device must just * disconnect the BUS and not abort any task. */ if (!i) { np->abrt_msg[0] = M_ABORT; np->abrt_tbl.size = 1; break; } /* * We have some task to abort. * Set the IDENTIFY(lun) */ np->abrt_msg[0] = IDENTIFY(0, cp->lun); /* * If we want to abort an untagged command, we * will send a IDENTIFY + M_ABORT. * Otherwise (tagged command), we will send * a IDENTITFY + task attributes + ABORT TAG. */ if (cp->tag == NO_TAG) { np->abrt_msg[1] = M_ABORT; np->abrt_tbl.size = 2; } else { np->abrt_msg[1] = cp->scsi_smsg[1]; np->abrt_msg[2] = cp->scsi_smsg[2]; np->abrt_msg[3] = M_ABORT_TAG; np->abrt_tbl.size = 4; } /* * Keep track of software timeout condition, since the * peripheral driver may not count retries on abort * conditions not due to timeout. */ if (cp->to_abort == 2) sym_set_cam_status(cp->cmd, DID_TIME_OUT); cp->to_abort = 0; /* We donnot expect to fail here */ break; /* * The target has accepted our message and switched * to BUS FREE phase as we expected. */ case SIR_ABORT_SENT: target = INB(np, nc_sdid) & 0xf; tp = &np->target[target]; starget = tp->starget; /* ** If we didn't abort anything, leave here. */ if (np->abrt_msg[0] == M_ABORT) break; /* * If we sent a M_RESET, then a hardware reset has * been performed by the target. * - Reset everything to async 8 bit * - Tell ourself to negotiate next time :-) * - Prepare to clear all disconnected CCBs for * this target from our task list (lun=task=-1) */ lun = -1; task = -1; if (np->abrt_msg[0] == M_RESET) { tp->head.sval = 0; tp->head.wval = np->rv_scntl3; tp->head.uval = 0; spi_period(starget) = 0; spi_offset(starget) = 0; spi_width(starget) = 0; spi_iu(starget) = 0; spi_dt(starget) = 0; spi_qas(starget) = 0; tp->tgoal.check_nego = 1; tp->tgoal.renego = 0; } /* * Otherwise, check for the LUN and TASK(s) * concerned by the cancelation. * If it is not ABORT_TAG then it is CLEAR_QUEUE * or an ABORT message :-) */ else { lun = np->abrt_msg[0] & 0x3f; if (np->abrt_msg[1] == M_ABORT_TAG) task = np->abrt_msg[2]; } /* * Complete all the CCBs the device should have * aborted due to our 'kiss of death' message. */ i = (INL(np, nc_scratcha) - np->squeue_ba) / 4; sym_dequeue_from_squeue(np, i, target, lun, -1); sym_clear_tasks(np, DID_ABORT, target, lun, task); sym_flush_comp_queue(np, 0); /* * If we sent a BDR, make upper layer aware of that. */ if (np->abrt_msg[0] == M_RESET) starget_printk(KERN_NOTICE, starget, "has been reset\n"); break; } /* * Print to the log the message we intend to send. */ if (num == SIR_TARGET_SELECTED) { dev_info(&tp->starget->dev, "control msgout:"); sym_printl_hex(np->abrt_msg, np->abrt_tbl.size); np->abrt_tbl.size = cpu_to_scr(np->abrt_tbl.size); } /* * Let the SCRIPTS processor continue. */ OUTONB_STD(); } /* * Gerard's alchemy:) that deals with the data * pointer for both MDP and the residual calculation. * * I didn't want to bloat the code by more than 200 * lines for the handling of both MDP and the residual. * This has been achieved by using a data pointer * representation consisting in an index in the data * array (dp_sg) and a negative offset (dp_ofs) that * have the following meaning: * * - dp_sg = SYM_CONF_MAX_SG * we are at the end of the data script. * - dp_sg < SYM_CONF_MAX_SG * dp_sg points to the next entry of the scatter array * we want to transfer. * - dp_ofs < 0 * dp_ofs represents the residual of bytes of the * previous entry scatter entry we will send first. * - dp_ofs = 0 * no residual to send first. * * The function sym_evaluate_dp() accepts an arbitray * offset (basically from the MDP message) and returns * the corresponding values of dp_sg and dp_ofs. */ static int sym_evaluate_dp(struct sym_hcb *np, struct sym_ccb *cp, u32 scr, int *ofs) { u32 dp_scr; int dp_ofs, dp_sg, dp_sgmin; int tmp; struct sym_pmc *pm; /* * Compute the resulted data pointer in term of a script * address within some DATA script and a signed byte offset. */ dp_scr = scr; dp_ofs = *ofs; if (dp_scr == SCRIPTA_BA(np, pm0_data)) pm = &cp->phys.pm0; else if (dp_scr == SCRIPTA_BA(np, pm1_data)) pm = &cp->phys.pm1; else pm = NULL; if (pm) { dp_scr = scr_to_cpu(pm->ret); dp_ofs -= scr_to_cpu(pm->sg.size) & 0x00ffffff; } /* * If we are auto-sensing, then we are done. */ if (cp->host_flags & HF_SENSE) { *ofs = dp_ofs; return 0; } /* * Deduce the index of the sg entry. * Keep track of the index of the first valid entry. * If result is dp_sg = SYM_CONF_MAX_SG, then we are at the * end of the data. */ tmp = scr_to_cpu(cp->goalp); dp_sg = SYM_CONF_MAX_SG; if (dp_scr != tmp) dp_sg -= (tmp - 8 - (int)dp_scr) / (2*4); dp_sgmin = SYM_CONF_MAX_SG - cp->segments; /* * Move to the sg entry the data pointer belongs to. * * If we are inside the data area, we expect result to be: * * Either, * dp_ofs = 0 and dp_sg is the index of the sg entry * the data pointer belongs to (or the end of the data) * Or, * dp_ofs < 0 and dp_sg is the index of the sg entry * the data pointer belongs to + 1. */ if (dp_ofs < 0) { int n; while (dp_sg > dp_sgmin) { --dp_sg; tmp = scr_to_cpu(cp->phys.data[dp_sg].size); n = dp_ofs + (tmp & 0xffffff); if (n > 0) { ++dp_sg; break; } dp_ofs = n; } } else if (dp_ofs > 0) { while (dp_sg < SYM_CONF_MAX_SG) { tmp = scr_to_cpu(cp->phys.data[dp_sg].size); dp_ofs -= (tmp & 0xffffff); ++dp_sg; if (dp_ofs <= 0) break; } } /* * Make sure the data pointer is inside the data area. * If not, return some error. */ if (dp_sg < dp_sgmin || (dp_sg == dp_sgmin && dp_ofs < 0)) goto out_err; else if (dp_sg > SYM_CONF_MAX_SG || (dp_sg == SYM_CONF_MAX_SG && dp_ofs > 0)) goto out_err; /* * Save the extreme pointer if needed. */ if (dp_sg > cp->ext_sg || (dp_sg == cp->ext_sg && dp_ofs > cp->ext_ofs)) { cp->ext_sg = dp_sg; cp->ext_ofs = dp_ofs; } /* * Return data. */ *ofs = dp_ofs; return dp_sg; out_err: return -1; } /* * chip handler for MODIFY DATA POINTER MESSAGE * * We also call this function on IGNORE WIDE RESIDUE * messages that do not match a SWIDE full condition. * Btw, we assume in that situation that such a message * is equivalent to a MODIFY DATA POINTER (offset=-1). */ static void sym_modify_dp(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp, int ofs) { int dp_ofs = ofs; u32 dp_scr = sym_get_script_dp (np, cp); u32 dp_ret; u32 tmp; u_char hflags; int dp_sg; struct sym_pmc *pm; /* * Not supported for auto-sense. */ if (cp->host_flags & HF_SENSE) goto out_reject; /* * Apply our alchemy:) (see comments in sym_evaluate_dp()), * to the resulted data pointer. */ dp_sg = sym_evaluate_dp(np, cp, dp_scr, &dp_ofs); if (dp_sg < 0) goto out_reject; /* * And our alchemy:) allows to easily calculate the data * script address we want to return for the next data phase. */ dp_ret = cpu_to_scr(cp->goalp); dp_ret = dp_ret - 8 - (SYM_CONF_MAX_SG - dp_sg) * (2*4); /* * If offset / scatter entry is zero we donnot need * a context for the new current data pointer. */ if (dp_ofs == 0) { dp_scr = dp_ret; goto out_ok; } /* * Get a context for the new current data pointer. */ hflags = INB(np, HF_PRT); if (hflags & HF_DP_SAVED) hflags ^= HF_ACT_PM; if (!(hflags & HF_ACT_PM)) { pm = &cp->phys.pm0; dp_scr = SCRIPTA_BA(np, pm0_data); } else { pm = &cp->phys.pm1; dp_scr = SCRIPTA_BA(np, pm1_data); } hflags &= ~(HF_DP_SAVED); OUTB(np, HF_PRT, hflags); /* * Set up the new current data pointer. * ofs < 0 there, and for the next data phase, we * want to transfer part of the data of the sg entry * corresponding to index dp_sg-1 prior to returning * to the main data script. */ pm->ret = cpu_to_scr(dp_ret); tmp = scr_to_cpu(cp->phys.data[dp_sg-1].addr); tmp += scr_to_cpu(cp->phys.data[dp_sg-1].size) + dp_ofs; pm->sg.addr = cpu_to_scr(tmp); pm->sg.size = cpu_to_scr(-dp_ofs); out_ok: sym_set_script_dp (np, cp, dp_scr); OUTL_DSP(np, SCRIPTA_BA(np, clrack)); return; out_reject: OUTL_DSP(np, SCRIPTB_BA(np, msg_bad)); } /* * chip calculation of the data residual. * * As I used to say, the requirement of data residual * in SCSI is broken, useless and cannot be achieved * without huge complexity. * But most OSes and even the official CAM require it. * When stupidity happens to be so widely spread inside * a community, it gets hard to convince. * * Anyway, I don't care, since I am not going to use * any software that considers this data residual as * a relevant information. :) */ int sym_compute_residual(struct sym_hcb *np, struct sym_ccb *cp) { int dp_sg, resid = 0; int dp_ofs = 0; /* * Check for some data lost or just thrown away. * We are not required to be quite accurate in this * situation. Btw, if we are odd for output and the * device claims some more data, it may well happen * than our residual be zero. :-) */ if (cp->xerr_status & (XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN)) { if (cp->xerr_status & XE_EXTRA_DATA) resid -= cp->extra_bytes; if (cp->xerr_status & XE_SODL_UNRUN) ++resid; if (cp->xerr_status & XE_SWIDE_OVRUN) --resid; } /* * If all data has been transferred, * there is no residual. */ if (cp->phys.head.lastp == cp->goalp) return resid; /* * If no data transfer occurs, or if the data * pointer is weird, return full residual. */ if (cp->startp == cp->phys.head.lastp || sym_evaluate_dp(np, cp, scr_to_cpu(cp->phys.head.lastp), &dp_ofs) < 0) { return cp->data_len - cp->odd_byte_adjustment; } /* * If we were auto-sensing, then we are done. */ if (cp->host_flags & HF_SENSE) { return -dp_ofs; } /* * We are now full comfortable in the computation * of the data residual (2's complement). */ resid = -cp->ext_ofs; for (dp_sg = cp->ext_sg; dp_sg < SYM_CONF_MAX_SG; ++dp_sg) { u_int tmp = scr_to_cpu(cp->phys.data[dp_sg].size); resid += (tmp & 0xffffff); } resid -= cp->odd_byte_adjustment; /* * Hopefully, the result is not too wrong. */ return resid; } /* * Negotiation for WIDE and SYNCHRONOUS DATA TRANSFER. * * When we try to negotiate, we append the negotiation message * to the identify and (maybe) simple tag message. * The host status field is set to HS_NEGOTIATE to mark this * situation. * * If the target doesn't answer this message immediately * (as required by the standard), the SIR_NEGO_FAILED interrupt * will be raised eventually. * The handler removes the HS_NEGOTIATE status, and sets the * negotiated value to the default (async / nowide). * * If we receive a matching answer immediately, we check it * for validity, and set the values. * * If we receive a Reject message immediately, we assume the * negotiation has failed, and fall back to standard values. * * If we receive a negotiation message while not in HS_NEGOTIATE * state, it's a target initiated negotiation. We prepare a * (hopefully) valid answer, set our parameters, and send back * this answer to the target. * * If the target doesn't fetch the answer (no message out phase), * we assume the negotiation has failed, and fall back to default * settings (SIR_NEGO_PROTO interrupt). * * When we set the values, we adjust them in all ccbs belonging * to this target, in the controller's register, and in the "phys" * field of the controller's struct sym_hcb. */ /* * chip handler for SYNCHRONOUS DATA TRANSFER REQUEST (SDTR) message. */ static int sym_sync_nego_check(struct sym_hcb *np, int req, struct sym_ccb *cp) { int target = cp->target; u_char chg, ofs, per, fak, div; if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_nego_msg(np, target, "sync msgin", np->msgin); } /* * Get requested values. */ chg = 0; per = np->msgin[3]; ofs = np->msgin[4]; /* * Check values against our limits. */ if (ofs) { if (ofs > np->maxoffs) {chg = 1; ofs = np->maxoffs;} } if (ofs) { if (per < np->minsync) {chg = 1; per = np->minsync;} } /* * Get new chip synchronous parameters value. */ div = fak = 0; if (ofs && sym_getsync(np, 0, per, &div, &fak) < 0) goto reject_it; if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_addr(cp->cmd, "sdtr: ofs=%d per=%d div=%d fak=%d chg=%d.\n", ofs, per, div, fak, chg); } /* * If it was an answer we want to change, * then it isn't acceptable. Reject it. */ if (!req && chg) goto reject_it; /* * Apply new values. */ sym_setsync (np, target, ofs, per, div, fak); /* * It was an answer. We are done. */ if (!req) return 0; /* * It was a request. Prepare an answer message. */ spi_populate_sync_msg(np->msgout, per, ofs); if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_nego_msg(np, target, "sync msgout", np->msgout); } np->msgin [0] = M_NOOP; return 0; reject_it: sym_setsync (np, target, 0, 0, 0, 0); return -1; } static void sym_sync_nego(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp) { int req = 1; int result; /* * Request or answer ? */ if (INB(np, HS_PRT) == HS_NEGOTIATE) { OUTB(np, HS_PRT, HS_BUSY); if (cp->nego_status && cp->nego_status != NS_SYNC) goto reject_it; req = 0; } /* * Check and apply new values. */ result = sym_sync_nego_check(np, req, cp); if (result) /* Not acceptable, reject it */ goto reject_it; if (req) { /* Was a request, send response. */ cp->nego_status = NS_SYNC; OUTL_DSP(np, SCRIPTB_BA(np, sdtr_resp)); } else /* Was a response, we are done. */ OUTL_DSP(np, SCRIPTA_BA(np, clrack)); return; reject_it: OUTL_DSP(np, SCRIPTB_BA(np, msg_bad)); } /* * chip handler for PARALLEL PROTOCOL REQUEST (PPR) message. */ static int sym_ppr_nego_check(struct sym_hcb *np, int req, int target) { struct sym_tcb *tp = &np->target[target]; unsigned char fak, div; int dt, chg = 0; unsigned char per = np->msgin[3]; unsigned char ofs = np->msgin[5]; unsigned char wide = np->msgin[6]; unsigned char opts = np->msgin[7] & PPR_OPT_MASK; if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_nego_msg(np, target, "ppr msgin", np->msgin); } /* * Check values against our limits. */ if (wide > np->maxwide) { chg = 1; wide = np->maxwide; } if (!wide || !(np->features & FE_U3EN)) opts = 0; if (opts != (np->msgin[7] & PPR_OPT_MASK)) chg = 1; dt = opts & PPR_OPT_DT; if (ofs) { unsigned char maxoffs = dt ? np->maxoffs_dt : np->maxoffs; if (ofs > maxoffs) { chg = 1; ofs = maxoffs; } } if (ofs) { unsigned char minsync = dt ? np->minsync_dt : np->minsync; if (per < minsync) { chg = 1; per = minsync; } } /* * Get new chip synchronous parameters value. */ div = fak = 0; if (ofs && sym_getsync(np, dt, per, &div, &fak) < 0) goto reject_it; /* * If it was an answer we want to change, * then it isn't acceptable. Reject it. */ if (!req && chg) goto reject_it; /* * Apply new values. */ sym_setpprot(np, target, opts, ofs, per, wide, div, fak); /* * It was an answer. We are done. */ if (!req) return 0; /* * It was a request. Prepare an answer message. */ spi_populate_ppr_msg(np->msgout, per, ofs, wide, opts); if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_nego_msg(np, target, "ppr msgout", np->msgout); } np->msgin [0] = M_NOOP; return 0; reject_it: sym_setpprot (np, target, 0, 0, 0, 0, 0, 0); /* * If it is a device response that should result in * ST, we may want to try a legacy negotiation later. */ if (!req && !opts) { tp->tgoal.period = per; tp->tgoal.offset = ofs; tp->tgoal.width = wide; tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0; tp->tgoal.check_nego = 1; } return -1; } static void sym_ppr_nego(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp) { int req = 1; int result; /* * Request or answer ? */ if (INB(np, HS_PRT) == HS_NEGOTIATE) { OUTB(np, HS_PRT, HS_BUSY); if (cp->nego_status && cp->nego_status != NS_PPR) goto reject_it; req = 0; } /* * Check and apply new values. */ result = sym_ppr_nego_check(np, req, cp->target); if (result) /* Not acceptable, reject it */ goto reject_it; if (req) { /* Was a request, send response. */ cp->nego_status = NS_PPR; OUTL_DSP(np, SCRIPTB_BA(np, ppr_resp)); } else /* Was a response, we are done. */ OUTL_DSP(np, SCRIPTA_BA(np, clrack)); return; reject_it: OUTL_DSP(np, SCRIPTB_BA(np, msg_bad)); } /* * chip handler for WIDE DATA TRANSFER REQUEST (WDTR) message. */ static int sym_wide_nego_check(struct sym_hcb *np, int req, struct sym_ccb *cp) { int target = cp->target; u_char chg, wide; if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_nego_msg(np, target, "wide msgin", np->msgin); } /* * Get requested values. */ chg = 0; wide = np->msgin[3]; /* * Check values against our limits. */ if (wide > np->maxwide) { chg = 1; wide = np->maxwide; } if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_addr(cp->cmd, "wdtr: wide=%d chg=%d.\n", wide, chg); } /* * If it was an answer we want to change, * then it isn't acceptable. Reject it. */ if (!req && chg) goto reject_it; /* * Apply new values. */ sym_setwide (np, target, wide); /* * It was an answer. We are done. */ if (!req) return 0; /* * It was a request. Prepare an answer message. */ spi_populate_width_msg(np->msgout, wide); np->msgin [0] = M_NOOP; if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_nego_msg(np, target, "wide msgout", np->msgout); } return 0; reject_it: return -1; } static void sym_wide_nego(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp) { int req = 1; int result; /* * Request or answer ? */ if (INB(np, HS_PRT) == HS_NEGOTIATE) { OUTB(np, HS_PRT, HS_BUSY); if (cp->nego_status && cp->nego_status != NS_WIDE) goto reject_it; req = 0; } /* * Check and apply new values. */ result = sym_wide_nego_check(np, req, cp); if (result) /* Not acceptable, reject it */ goto reject_it; if (req) { /* Was a request, send response. */ cp->nego_status = NS_WIDE; OUTL_DSP(np, SCRIPTB_BA(np, wdtr_resp)); } else { /* Was a response. */ /* * Negotiate for SYNC immediately after WIDE response. * This allows to negotiate for both WIDE and SYNC on * a single SCSI command (Suggested by Justin Gibbs). */ if (tp->tgoal.offset) { spi_populate_sync_msg(np->msgout, tp->tgoal.period, tp->tgoal.offset); if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_nego_msg(np, cp->target, "sync msgout", np->msgout); } cp->nego_status = NS_SYNC; OUTB(np, HS_PRT, HS_NEGOTIATE); OUTL_DSP(np, SCRIPTB_BA(np, sdtr_resp)); return; } else OUTL_DSP(np, SCRIPTA_BA(np, clrack)); } return; reject_it: OUTL_DSP(np, SCRIPTB_BA(np, msg_bad)); } /* * Reset DT, SYNC or WIDE to default settings. * * Called when a negotiation does not succeed either * on rejection or on protocol error. * * A target that understands a PPR message should never * reject it, and messing with it is very unlikely. * So, if a PPR makes problems, we may just want to * try a legacy negotiation later. */ static void sym_nego_default(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp) { switch (cp->nego_status) { case NS_PPR: #if 0 sym_setpprot (np, cp->target, 0, 0, 0, 0, 0, 0); #else if (tp->tgoal.period < np->minsync) tp->tgoal.period = np->minsync; if (tp->tgoal.offset > np->maxoffs) tp->tgoal.offset = np->maxoffs; tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0; tp->tgoal.check_nego = 1; #endif break; case NS_SYNC: sym_setsync (np, cp->target, 0, 0, 0, 0); break; case NS_WIDE: sym_setwide (np, cp->target, 0); break; } np->msgin [0] = M_NOOP; np->msgout[0] = M_NOOP; cp->nego_status = 0; } /* * chip handler for MESSAGE REJECT received in response to * PPR, WIDE or SYNCHRONOUS negotiation. */ static void sym_nego_rejected(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp) { sym_nego_default(np, tp, cp); OUTB(np, HS_PRT, HS_BUSY); } #define sym_printk(lvl, tp, cp, fmt, v...) do { \ if (cp) \ scmd_printk(lvl, cp->cmd, fmt, ##v); \ else \ starget_printk(lvl, tp->starget, fmt, ##v); \ } while (0) /* * chip exception handler for programmed interrupts. */ static void sym_int_sir(struct sym_hcb *np) { u_char num = INB(np, nc_dsps); u32 dsa = INL(np, nc_dsa); struct sym_ccb *cp = sym_ccb_from_dsa(np, dsa); u_char target = INB(np, nc_sdid) & 0x0f; struct sym_tcb *tp = &np->target[target]; int tmp; if (DEBUG_FLAGS & DEBUG_TINY) printf ("I#%d", num); switch (num) { #if SYM_CONF_DMA_ADDRESSING_MODE == 2 /* * SCRIPTS tell us that we may have to update * 64 bit DMA segment registers. */ case SIR_DMAP_DIRTY: sym_update_dmap_regs(np); goto out; #endif /* * Command has been completed with error condition * or has been auto-sensed. */ case SIR_COMPLETE_ERROR: sym_complete_error(np, cp); return; /* * The C code is currently trying to recover from something. * Typically, user want to abort some command. */ case SIR_SCRIPT_STOPPED: case SIR_TARGET_SELECTED: case SIR_ABORT_SENT: sym_sir_task_recovery(np, num); return; /* * The device didn't go to MSG OUT phase after having * been selected with ATN. We do not want to handle that. */ case SIR_SEL_ATN_NO_MSG_OUT: sym_printk(KERN_WARNING, tp, cp, "No MSG OUT phase after selection with ATN\n"); goto out_stuck; /* * The device didn't switch to MSG IN phase after * having reselected the initiator. */ case SIR_RESEL_NO_MSG_IN: sym_printk(KERN_WARNING, tp, cp, "No MSG IN phase after reselection\n"); goto out_stuck; /* * After reselection, the device sent a message that wasn't * an IDENTIFY. */ case SIR_RESEL_NO_IDENTIFY: sym_printk(KERN_WARNING, tp, cp, "No IDENTIFY after reselection\n"); goto out_stuck; /* * The device reselected a LUN we do not know about. */ case SIR_RESEL_BAD_LUN: np->msgout[0] = M_RESET; goto out; /* * The device reselected for an untagged nexus and we * haven't any. */ case SIR_RESEL_BAD_I_T_L: np->msgout[0] = M_ABORT; goto out; /* * The device reselected for a tagged nexus that we do not have. */ case SIR_RESEL_BAD_I_T_L_Q: np->msgout[0] = M_ABORT_TAG; goto out; /* * The SCRIPTS let us know that the device has grabbed * our message and will abort the job. */ case SIR_RESEL_ABORTED: np->lastmsg = np->msgout[0]; np->msgout[0] = M_NOOP; sym_printk(KERN_WARNING, tp, cp, "message %x sent on bad reselection\n", np->lastmsg); goto out; /* * The SCRIPTS let us know that a message has been * successfully sent to the device. */ case SIR_MSG_OUT_DONE: np->lastmsg = np->msgout[0]; np->msgout[0] = M_NOOP; /* Should we really care of that */ if (np->lastmsg == M_PARITY || np->lastmsg == M_ID_ERROR) { if (cp) { cp->xerr_status &= ~XE_PARITY_ERR; if (!cp->xerr_status) OUTOFFB(np, HF_PRT, HF_EXT_ERR); } } goto out; /* * The device didn't send a GOOD SCSI status. * We may have some work to do prior to allow * the SCRIPTS processor to continue. */ case SIR_BAD_SCSI_STATUS: if (!cp) goto out; sym_sir_bad_scsi_status(np, num, cp); return; /* * We are asked by the SCRIPTS to prepare a * REJECT message. */ case SIR_REJECT_TO_SEND: sym_print_msg(cp, "M_REJECT to send for ", np->msgin); np->msgout[0] = M_REJECT; goto out; /* * We have been ODD at the end of a DATA IN * transfer and the device didn't send a * IGNORE WIDE RESIDUE message. * It is a data overrun condition. */ case SIR_SWIDE_OVERRUN: if (cp) { OUTONB(np, HF_PRT, HF_EXT_ERR); cp->xerr_status |= XE_SWIDE_OVRUN; } goto out; /* * We have been ODD at the end of a DATA OUT * transfer. * It is a data underrun condition. */ case SIR_SODL_UNDERRUN: if (cp) { OUTONB(np, HF_PRT, HF_EXT_ERR); cp->xerr_status |= XE_SODL_UNRUN; } goto out; /* * The device wants us to tranfer more data than * expected or in the wrong direction. * The number of extra bytes is in scratcha. * It is a data overrun condition. */ case SIR_DATA_OVERRUN: if (cp) { OUTONB(np, HF_PRT, HF_EXT_ERR); cp->xerr_status |= XE_EXTRA_DATA; cp->extra_bytes += INL(np, nc_scratcha); } goto out; /* * The device switched to an illegal phase (4/5). */ case SIR_BAD_PHASE: if (cp) { OUTONB(np, HF_PRT, HF_EXT_ERR); cp->xerr_status |= XE_BAD_PHASE; } goto out; /* * We received a message. */ case SIR_MSG_RECEIVED: if (!cp) goto out_stuck; switch (np->msgin [0]) { /* * We received an extended message. * We handle MODIFY DATA POINTER, SDTR, WDTR * and reject all other extended messages. */ case M_EXTENDED: switch (np->msgin [2]) { case M_X_MODIFY_DP: if (DEBUG_FLAGS & DEBUG_POINTER) sym_print_msg(cp, "extended msg ", np->msgin); tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) + (np->msgin[5]<<8) + (np->msgin[6]); sym_modify_dp(np, tp, cp, tmp); return; case M_X_SYNC_REQ: sym_sync_nego(np, tp, cp); return; case M_X_PPR_REQ: sym_ppr_nego(np, tp, cp); return; case M_X_WIDE_REQ: sym_wide_nego(np, tp, cp); return; default: goto out_reject; } break; /* * We received a 1/2 byte message not handled from SCRIPTS. * We are only expecting MESSAGE REJECT and IGNORE WIDE * RESIDUE messages that haven't been anticipated by * SCRIPTS on SWIDE full condition. Unanticipated IGNORE * WIDE RESIDUE messages are aliased as MODIFY DP (-1). */ case M_IGN_RESIDUE: if (DEBUG_FLAGS & DEBUG_POINTER) sym_print_msg(cp, "1 or 2 byte ", np->msgin); if (cp->host_flags & HF_SENSE) OUTL_DSP(np, SCRIPTA_BA(np, clrack)); else sym_modify_dp(np, tp, cp, -1); return; case M_REJECT: if (INB(np, HS_PRT) == HS_NEGOTIATE) sym_nego_rejected(np, tp, cp); else { sym_print_addr(cp->cmd, "M_REJECT received (%x:%x).\n", scr_to_cpu(np->lastmsg), np->msgout[0]); } goto out_clrack; default: goto out_reject; } break; /* * We received an unknown message. * Ignore all MSG IN phases and reject it. */ case SIR_MSG_WEIRD: sym_print_msg(cp, "WEIRD message received", np->msgin); OUTL_DSP(np, SCRIPTB_BA(np, msg_weird)); return; /* * Negotiation failed. * Target does not send us the reply. * Remove the HS_NEGOTIATE status. */ case SIR_NEGO_FAILED: OUTB(np, HS_PRT, HS_BUSY); /* * Negotiation failed. * Target does not want answer message. */ fallthrough; case SIR_NEGO_PROTO: sym_nego_default(np, tp, cp); goto out; } out: OUTONB_STD(); return; out_reject: OUTL_DSP(np, SCRIPTB_BA(np, msg_bad)); return; out_clrack: OUTL_DSP(np, SCRIPTA_BA(np, clrack)); return; out_stuck: return; } /* * Acquire a control block */ struct sym_ccb *sym_get_ccb (struct sym_hcb *np, struct scsi_cmnd *cmd, u_char tag_order) { u_char tn = cmd->device->id; u_char ln = cmd->device->lun; struct sym_tcb *tp = &np->target[tn]; struct sym_lcb *lp = sym_lp(tp, ln); u_short tag = NO_TAG; SYM_QUEHEAD *qp; struct sym_ccb *cp = NULL; /* * Look for a free CCB */ if (sym_que_empty(&np->free_ccbq)) sym_alloc_ccb(np); qp = sym_remque_head(&np->free_ccbq); if (!qp) goto out; cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); { /* * If we have been asked for a tagged command. */ if (tag_order) { /* * Debugging purpose. */ #ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING if (lp->busy_itl != 0) goto out_free; #endif /* * Allocate resources for tags if not yet. */ if (!lp->cb_tags) { sym_alloc_lcb_tags(np, tn, ln); if (!lp->cb_tags) goto out_free; } /* * Get a tag for this SCSI IO and set up * the CCB bus address for reselection, * and count it for this LUN. * Toggle reselect path to tagged. */ if (lp->busy_itlq < SYM_CONF_MAX_TASK) { tag = lp->cb_tags[lp->ia_tag]; if (++lp->ia_tag == SYM_CONF_MAX_TASK) lp->ia_tag = 0; ++lp->busy_itlq; #ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING lp->itlq_tbl[tag] = cpu_to_scr(cp->ccb_ba); lp->head.resel_sa = cpu_to_scr(SCRIPTA_BA(np, resel_tag)); #endif #ifdef SYM_OPT_LIMIT_COMMAND_REORDERING cp->tags_si = lp->tags_si; ++lp->tags_sum[cp->tags_si]; ++lp->tags_since; #endif } else goto out_free; } /* * This command will not be tagged. * If we already have either a tagged or untagged * one, refuse to overlap this untagged one. */ else { /* * Debugging purpose. */ #ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING if (lp->busy_itl != 0 || lp->busy_itlq != 0) goto out_free; #endif /* * Count this nexus for this LUN. * Set up the CCB bus address for reselection. * Toggle reselect path to untagged. */ ++lp->busy_itl; #ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING if (lp->busy_itl == 1) { lp->head.itl_task_sa = cpu_to_scr(cp->ccb_ba); lp->head.resel_sa = cpu_to_scr(SCRIPTA_BA(np, resel_no_tag)); } else goto out_free; #endif } } /* * Put the CCB into the busy queue. */ sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq); #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING if (lp) { sym_remque(&cp->link2_ccbq); sym_insque_tail(&cp->link2_ccbq, &lp->waiting_ccbq); } #endif cp->to_abort = 0; cp->odd_byte_adjustment = 0; cp->tag = tag; cp->order = tag_order; cp->target = tn; cp->lun = ln; if (DEBUG_FLAGS & DEBUG_TAGS) { sym_print_addr(cmd, "ccb @%p using tag %d.\n", cp, tag); } out: return cp; out_free: sym_insque_head(&cp->link_ccbq, &np->free_ccbq); return NULL; } /* * Release one control block */ void sym_free_ccb (struct sym_hcb *np, struct sym_ccb *cp) { struct sym_tcb *tp = &np->target[cp->target]; struct sym_lcb *lp = sym_lp(tp, cp->lun); if (DEBUG_FLAGS & DEBUG_TAGS) { sym_print_addr(cp->cmd, "ccb @%p freeing tag %d.\n", cp, cp->tag); } /* * If LCB available, */ if (lp) { /* * If tagged, release the tag, set the relect path */ if (cp->tag != NO_TAG) { #ifdef SYM_OPT_LIMIT_COMMAND_REORDERING --lp->tags_sum[cp->tags_si]; #endif /* * Free the tag value. */ lp->cb_tags[lp->if_tag] = cp->tag; if (++lp->if_tag == SYM_CONF_MAX_TASK) lp->if_tag = 0; /* * Make the reselect path invalid, * and uncount this CCB. */ lp->itlq_tbl[cp->tag] = cpu_to_scr(np->bad_itlq_ba); --lp->busy_itlq; } else { /* Untagged */ /* * Make the reselect path invalid, * and uncount this CCB. */ lp->head.itl_task_sa = cpu_to_scr(np->bad_itl_ba); --lp->busy_itl; } /* * If no JOB active, make the LUN reselect path invalid. */ if (lp->busy_itlq == 0 && lp->busy_itl == 0) lp->head.resel_sa = cpu_to_scr(SCRIPTB_BA(np, resel_bad_lun)); } /* * We donnot queue more than 1 ccb per target * with negotiation at any time. If this ccb was * used for negotiation, clear this info in the tcb. */ if (cp == tp->nego_cp) tp->nego_cp = NULL; #ifdef SYM_CONF_IARB_SUPPORT /* * If we just complete the last queued CCB, * clear this info that is no longer relevant. */ if (cp == np->last_cp) np->last_cp = 0; #endif /* * Make this CCB available. */ cp->cmd = NULL; cp->host_status = HS_IDLE; sym_remque(&cp->link_ccbq); sym_insque_head(&cp->link_ccbq, &np->free_ccbq); #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING if (lp) { sym_remque(&cp->link2_ccbq); sym_insque_tail(&cp->link2_ccbq, &np->dummy_ccbq); if (cp->started) { if (cp->tag != NO_TAG) --lp->started_tags; else --lp->started_no_tag; } } cp->started = 0; #endif } /* * Allocate a CCB from memory and initialize its fixed part. */ static struct sym_ccb *sym_alloc_ccb(struct sym_hcb *np) { struct sym_ccb *cp = NULL; int hcode; /* * Prevent from allocating more CCBs than we can * queue to the controller. */ if (np->actccbs >= SYM_CONF_MAX_START) return NULL; /* * Allocate memory for this CCB. */ cp = sym_calloc_dma(sizeof(struct sym_ccb), "CCB"); if (!cp) goto out_free; /* * Count it. */ np->actccbs++; /* * Compute the bus address of this ccb. */ cp->ccb_ba = vtobus(cp); /* * Insert this ccb into the hashed list. */ hcode = CCB_HASH_CODE(cp->ccb_ba); cp->link_ccbh = np->ccbh[hcode]; np->ccbh[hcode] = cp; /* * Initialyze the start and restart actions. */ cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA(np, idle)); cp->phys.head.go.restart = cpu_to_scr(SCRIPTB_BA(np, bad_i_t_l)); /* * Initilialyze some other fields. */ cp->phys.smsg_ext.addr = cpu_to_scr(HCB_BA(np, msgin[2])); /* * Chain into free ccb queue. */ sym_insque_head(&cp->link_ccbq, &np->free_ccbq); /* * Chain into optionnal lists. */ #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING sym_insque_head(&cp->link2_ccbq, &np->dummy_ccbq); #endif return cp; out_free: if (cp) sym_mfree_dma(cp, sizeof(*cp), "CCB"); return NULL; } /* * Look up a CCB from a DSA value. */ static struct sym_ccb *sym_ccb_from_dsa(struct sym_hcb *np, u32 dsa) { int hcode; struct sym_ccb *cp; hcode = CCB_HASH_CODE(dsa); cp = np->ccbh[hcode]; while (cp) { if (cp->ccb_ba == dsa) break; cp = cp->link_ccbh; } return cp; } /* * Target control block initialisation. * Nothing important to do at the moment. */ static void sym_init_tcb (struct sym_hcb *np, u_char tn) { #if 0 /* Hmmm... this checking looks paranoid. */ /* * Check some alignments required by the chip. */ assert (((offsetof(struct sym_reg, nc_sxfer) ^ offsetof(struct sym_tcb, head.sval)) &3) == 0); assert (((offsetof(struct sym_reg, nc_scntl3) ^ offsetof(struct sym_tcb, head.wval)) &3) == 0); #endif } /* * Lun control block allocation and initialization. */ struct sym_lcb *sym_alloc_lcb (struct sym_hcb *np, u_char tn, u_char ln) { struct sym_tcb *tp = &np->target[tn]; struct sym_lcb *lp = NULL; /* * Initialize the target control block if not yet. */ sym_init_tcb (np, tn); /* * Allocate the LCB bus address array. * Compute the bus address of this table. */ if (ln && !tp->luntbl) { tp->luntbl = sym_calloc_dma(256, "LUNTBL"); if (!tp->luntbl) goto fail; memset32(tp->luntbl, cpu_to_scr(vtobus(&np->badlun_sa)), 64); tp->head.luntbl_sa = cpu_to_scr(vtobus(tp->luntbl)); } /* * Allocate the table of pointers for LUN(s) > 0, if needed. */ if (ln && !tp->lunmp) { tp->lunmp = kcalloc(SYM_CONF_MAX_LUN, sizeof(struct sym_lcb *), GFP_ATOMIC); if (!tp->lunmp) goto fail; } /* * Allocate the lcb. * Make it available to the chip. */ lp = sym_calloc_dma(sizeof(struct sym_lcb), "LCB"); if (!lp) goto fail; if (ln) { tp->lunmp[ln] = lp; tp->luntbl[ln] = cpu_to_scr(vtobus(lp)); } else { tp->lun0p = lp; tp->head.lun0_sa = cpu_to_scr(vtobus(lp)); } tp->nlcb++; /* * Let the itl task point to error handling. */ lp->head.itl_task_sa = cpu_to_scr(np->bad_itl_ba); /* * Set the reselect pattern to our default. :) */ lp->head.resel_sa = cpu_to_scr(SCRIPTB_BA(np, resel_bad_lun)); /* * Set user capabilities. */ lp->user_flags = tp->usrflags & (SYM_DISC_ENABLED | SYM_TAGS_ENABLED); #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING /* * Initialize device queueing. */ sym_que_init(&lp->waiting_ccbq); sym_que_init(&lp->started_ccbq); lp->started_max = SYM_CONF_MAX_TASK; lp->started_limit = SYM_CONF_MAX_TASK; #endif fail: return lp; } /* * Allocate LCB resources for tagged command queuing. */ static void sym_alloc_lcb_tags (struct sym_hcb *np, u_char tn, u_char ln) { struct sym_tcb *tp = &np->target[tn]; struct sym_lcb *lp = sym_lp(tp, ln); int i; /* * Allocate the task table and and the tag allocation * circular buffer. We want both or none. */ lp->itlq_tbl = sym_calloc_dma(SYM_CONF_MAX_TASK*4, "ITLQ_TBL"); if (!lp->itlq_tbl) goto fail; lp->cb_tags = kcalloc(SYM_CONF_MAX_TASK, 1, GFP_ATOMIC); if (!lp->cb_tags) { sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL"); lp->itlq_tbl = NULL; goto fail; } /* * Initialize the task table with invalid entries. */ memset32(lp->itlq_tbl, cpu_to_scr(np->notask_ba), SYM_CONF_MAX_TASK); /* * Fill up the tag buffer with tag numbers. */ for (i = 0 ; i < SYM_CONF_MAX_TASK ; i++) lp->cb_tags[i] = i; /* * Make the task table available to SCRIPTS, * And accept tagged commands now. */ lp->head.itlq_tbl_sa = cpu_to_scr(vtobus(lp->itlq_tbl)); return; fail: return; } /* * Lun control block deallocation. Returns the number of valid remaining LCBs * for the target. */ int sym_free_lcb(struct sym_hcb *np, u_char tn, u_char ln) { struct sym_tcb *tp = &np->target[tn]; struct sym_lcb *lp = sym_lp(tp, ln); tp->nlcb--; if (ln) { if (!tp->nlcb) { kfree(tp->lunmp); sym_mfree_dma(tp->luntbl, 256, "LUNTBL"); tp->lunmp = NULL; tp->luntbl = NULL; tp->head.luntbl_sa = cpu_to_scr(vtobus(np->badluntbl)); } else { tp->luntbl[ln] = cpu_to_scr(vtobus(&np->badlun_sa)); tp->lunmp[ln] = NULL; } } else { tp->lun0p = NULL; tp->head.lun0_sa = cpu_to_scr(vtobus(&np->badlun_sa)); } if (lp->itlq_tbl) { sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL"); kfree(lp->cb_tags); } sym_mfree_dma(lp, sizeof(*lp), "LCB"); return tp->nlcb; } /* * Queue a SCSI IO to the controller. */ int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp) { struct scsi_device *sdev = cmd->device; struct sym_tcb *tp; struct sym_lcb *lp; u_char *msgptr; u_int msglen; int can_disconnect; /* * Keep track of the IO in our CCB. */ cp->cmd = cmd; /* * Retrieve the target descriptor. */ tp = &np->target[cp->target]; /* * Retrieve the lun descriptor. */ lp = sym_lp(tp, sdev->lun); can_disconnect = (cp->tag != NO_TAG) || (lp && (lp->curr_flags & SYM_DISC_ENABLED)); msgptr = cp->scsi_smsg; msglen = 0; msgptr[msglen++] = IDENTIFY(can_disconnect, sdev->lun); /* * Build the tag message if present. */ if (cp->tag != NO_TAG) { u_char order = cp->order; switch(order) { case M_ORDERED_TAG: break; case M_HEAD_TAG: break; default: order = M_SIMPLE_TAG; } #ifdef SYM_OPT_LIMIT_COMMAND_REORDERING /* * Avoid too much reordering of SCSI commands. * The algorithm tries to prevent completion of any * tagged command from being delayed against more * than 3 times the max number of queued commands. */ if (lp && lp->tags_since > 3*SYM_CONF_MAX_TAG) { lp->tags_si = !(lp->tags_si); if (lp->tags_sum[lp->tags_si]) { order = M_ORDERED_TAG; if ((DEBUG_FLAGS & DEBUG_TAGS)||sym_verbose>1) { sym_print_addr(cmd, "ordered tag forced.\n"); } } lp->tags_since = 0; } #endif msgptr[msglen++] = order; /* * For less than 128 tags, actual tags are numbered * 1,3,5,..2*MAXTAGS+1,since we may have to deal * with devices that have problems with #TAG 0 or too * great #TAG numbers. For more tags (up to 256), * we use directly our tag number. */ #if SYM_CONF_MAX_TASK > (512/4) msgptr[msglen++] = cp->tag; #else msgptr[msglen++] = (cp->tag << 1) + 1; #endif } /* * Build a negotiation message if needed. * (nego_status is filled by sym_prepare_nego()) * * Always negotiate on INQUIRY and REQUEST SENSE. * */ cp->nego_status = 0; if ((tp->tgoal.check_nego || cmd->cmnd[0] == INQUIRY || cmd->cmnd[0] == REQUEST_SENSE) && !tp->nego_cp && lp) { msglen += sym_prepare_nego(np, cp, msgptr + msglen); } /* * Startqueue */ cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA(np, select)); cp->phys.head.go.restart = cpu_to_scr(SCRIPTA_BA(np, resel_dsa)); /* * select */ cp->phys.select.sel_id = cp->target; cp->phys.select.sel_scntl3 = tp->head.wval; cp->phys.select.sel_sxfer = tp->head.sval; cp->phys.select.sel_scntl4 = tp->head.uval; /* * message */ cp->phys.smsg.addr = CCB_BA(cp, scsi_smsg); cp->phys.smsg.size = cpu_to_scr(msglen); /* * status */ cp->host_xflags = 0; cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY; cp->ssss_status = S_ILLEGAL; cp->xerr_status = 0; cp->host_flags = 0; cp->extra_bytes = 0; /* * extreme data pointer. * shall be positive, so -1 is lower than lowest.:) */ cp->ext_sg = -1; cp->ext_ofs = 0; /* * Build the CDB and DATA descriptor block * and start the IO. */ return sym_setup_data_and_start(np, cmd, cp); } /* * Reset a SCSI target (all LUNs of this target). */ int sym_reset_scsi_target(struct sym_hcb *np, int target) { struct sym_tcb *tp; if (target == np->myaddr || (u_int)target >= SYM_CONF_MAX_TARGET) return -1; tp = &np->target[target]; tp->to_reset = 1; np->istat_sem = SEM; OUTB(np, nc_istat, SIGP|SEM); return 0; } /* * Abort a SCSI IO. */ static int sym_abort_ccb(struct sym_hcb *np, struct sym_ccb *cp, int timed_out) { /* * Check that the IO is active. */ if (!cp || !cp->host_status || cp->host_status == HS_WAIT) return -1; /* * If a previous abort didn't succeed in time, * perform a BUS reset. */ if (cp->to_abort) { sym_reset_scsi_bus(np, 1); return 0; } /* * Mark the CCB for abort and allow time for. */ cp->to_abort = timed_out ? 2 : 1; /* * Tell the SCRIPTS processor to stop and synchronize with us. */ np->istat_sem = SEM; OUTB(np, nc_istat, SIGP|SEM); return 0; } int sym_abort_scsiio(struct sym_hcb *np, struct scsi_cmnd *cmd, int timed_out) { struct sym_ccb *cp; SYM_QUEHEAD *qp; /* * Look up our CCB control block. */ cp = NULL; FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { struct sym_ccb *cp2 = sym_que_entry(qp, struct sym_ccb, link_ccbq); if (cp2->cmd == cmd) { cp = cp2; break; } } return sym_abort_ccb(np, cp, timed_out); } /* * Complete execution of a SCSI command with extended * error, SCSI status error, or having been auto-sensed. * * The SCRIPTS processor is not running there, so we * can safely access IO registers and remove JOBs from * the START queue. * SCRATCHA is assumed to have been loaded with STARTPOS * before the SCRIPTS called the C code. */ void sym_complete_error(struct sym_hcb *np, struct sym_ccb *cp) { struct scsi_device *sdev; struct scsi_cmnd *cmd; #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING struct sym_tcb *tp; struct sym_lcb *lp; #endif int resid; int i; /* * Paranoid check. :) */ if (!cp || !cp->cmd) return; cmd = cp->cmd; sdev = cmd->device; if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_RESULT)) { dev_info(&sdev->sdev_gendev, "CCB=%p STAT=%x/%x/%x\n", cp, cp->host_status, cp->ssss_status, cp->host_flags); } #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING /* * Get target and lun pointers. */ tp = &np->target[cp->target]; lp = sym_lp(tp, sdev->lun); #endif /* * Check for extended errors. */ if (cp->xerr_status) { if (sym_verbose) sym_print_xerr(cmd, cp->xerr_status); if (cp->host_status == HS_COMPLETE) cp->host_status = HS_COMP_ERR; } /* * Calculate the residual. */ resid = sym_compute_residual(np, cp); if (!SYM_SETUP_RESIDUAL_SUPPORT) {/* If user does not want residuals */ resid = 0; /* throw them away. :) */ cp->sv_resid = 0; } #ifdef DEBUG_2_0_X if (resid) printf("XXXX RESID= %d - 0x%x\n", resid, resid); #endif /* * Dequeue all queued CCBs for that device * not yet started by SCRIPTS. */ i = (INL(np, nc_scratcha) - np->squeue_ba) / 4; i = sym_dequeue_from_squeue(np, i, cp->target, sdev->lun, -1); /* * Restart the SCRIPTS processor. */ OUTL_DSP(np, SCRIPTA_BA(np, start)); #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING if (cp->host_status == HS_COMPLETE && cp->ssss_status == S_QUEUE_FULL) { if (!lp || lp->started_tags - i < 2) goto weirdness; /* * Decrease queue depth as needed. */ lp->started_max = lp->started_tags - i - 1; lp->num_sgood = 0; if (sym_verbose >= 2) { sym_print_addr(cmd, " queue depth is now %d\n", lp->started_max); } /* * Repair the CCB. */ cp->host_status = HS_BUSY; cp->ssss_status = S_ILLEGAL; /* * Let's requeue it to device. */ sym_set_cam_status(cmd, DID_SOFT_ERROR); goto finish; } weirdness: #endif /* * Build result in CAM ccb. */ sym_set_cam_result_error(np, cp, resid); #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING finish: #endif /* * Add this one to the COMP queue. */ sym_remque(&cp->link_ccbq); sym_insque_head(&cp->link_ccbq, &np->comp_ccbq); /* * Complete all those commands with either error * or requeue condition. */ sym_flush_comp_queue(np, 0); #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING /* * Donnot start more than 1 command after an error. */ sym_start_next_ccbs(np, lp, 1); #endif } /* * Complete execution of a successful SCSI command. * * Only successful commands go to the DONE queue, * since we need to have the SCRIPTS processor * stopped on any error condition. * The SCRIPTS processor is running while we are * completing successful commands. */ void sym_complete_ok (struct sym_hcb *np, struct sym_ccb *cp) { #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING struct sym_tcb *tp; struct sym_lcb *lp; #endif struct scsi_cmnd *cmd; int resid; /* * Paranoid check. :) */ if (!cp || !cp->cmd) return; assert (cp->host_status == HS_COMPLETE); /* * Get user command. */ cmd = cp->cmd; #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING /* * Get target and lun pointers. */ tp = &np->target[cp->target]; lp = sym_lp(tp, cp->lun); #endif /* * If all data have been transferred, given than no * extended error did occur, there is no residual. */ resid = 0; if (cp->phys.head.lastp != cp->goalp) resid = sym_compute_residual(np, cp); /* * Wrong transfer residuals may be worse than just always * returning zero. User can disable this feature in * sym53c8xx.h. Residual support is enabled by default. */ if (!SYM_SETUP_RESIDUAL_SUPPORT) resid = 0; #ifdef DEBUG_2_0_X if (resid) printf("XXXX RESID= %d - 0x%x\n", resid, resid); #endif /* * Build result in CAM ccb. */ sym_set_cam_result_ok(cp, cmd, resid); #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING /* * If max number of started ccbs had been reduced, * increase it if 200 good status received. */ if (lp && lp->started_max < lp->started_limit) { ++lp->num_sgood; if (lp->num_sgood >= 200) { lp->num_sgood = 0; ++lp->started_max; if (sym_verbose >= 2) { sym_print_addr(cmd, " queue depth is now %d\n", lp->started_max); } } } #endif /* * Free our CCB. */ sym_free_ccb (np, cp); #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING /* * Requeue a couple of awaiting scsi commands. */ if (!sym_que_empty(&lp->waiting_ccbq)) sym_start_next_ccbs(np, lp, 2); #endif /* * Complete the command. */ sym_xpt_done(np, cmd); } /* * Soft-attach the controller. */ int sym_hcb_attach(struct Scsi_Host *shost, struct sym_fw *fw, struct sym_nvram *nvram) { struct sym_hcb *np = sym_get_hcb(shost); int i; /* * Get some info about the firmware. */ np->scripta_sz = fw->a_size; np->scriptb_sz = fw->b_size; np->scriptz_sz = fw->z_size; np->fw_setup = fw->setup; np->fw_patch = fw->patch; np->fw_name = fw->name; /* * Save setting of some IO registers, so we will * be able to probe specific implementations. */ sym_save_initial_setting (np); /* * Reset the chip now, since it has been reported * that SCSI clock calibration may not work properly * if the chip is currently active. */ sym_chip_reset(np); /* * Prepare controller and devices settings, according * to chip features, user set-up and driver set-up. */ sym_prepare_setting(shost, np, nvram); /* * Check the PCI clock frequency. * Must be performed after prepare_setting since it destroys * STEST1 that is used to probe for the clock doubler. */ i = sym_getpciclock(np); if (i > 37000 && !(np->features & FE_66MHZ)) printf("%s: PCI BUS clock seems too high: %u KHz.\n", sym_name(np), i); /* * Allocate the start queue. */ np->squeue = sym_calloc_dma(sizeof(u32)*(MAX_QUEUE*2),"SQUEUE"); if (!np->squeue) goto attach_failed; np->squeue_ba = vtobus(np->squeue); /* * Allocate the done queue. */ np->dqueue = sym_calloc_dma(sizeof(u32)*(MAX_QUEUE*2),"DQUEUE"); if (!np->dqueue) goto attach_failed; np->dqueue_ba = vtobus(np->dqueue); /* * Allocate the target bus address array. */ np->targtbl = sym_calloc_dma(256, "TARGTBL"); if (!np->targtbl) goto attach_failed; np->targtbl_ba = vtobus(np->targtbl); /* * Allocate SCRIPTS areas. */ np->scripta0 = sym_calloc_dma(np->scripta_sz, "SCRIPTA0"); np->scriptb0 = sym_calloc_dma(np->scriptb_sz, "SCRIPTB0"); np->scriptz0 = sym_calloc_dma(np->scriptz_sz, "SCRIPTZ0"); if (!np->scripta0 || !np->scriptb0 || !np->scriptz0) goto attach_failed; /* * Allocate the array of lists of CCBs hashed by DSA. */ np->ccbh = kcalloc(CCB_HASH_SIZE, sizeof(*np->ccbh), GFP_KERNEL); if (!np->ccbh) goto attach_failed; /* * Initialyze the CCB free and busy queues. */ sym_que_init(&np->free_ccbq); sym_que_init(&np->busy_ccbq); sym_que_init(&np->comp_ccbq); /* * Initialization for optional handling * of device queueing. */ #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING sym_que_init(&np->dummy_ccbq); #endif /* * Allocate some CCB. We need at least ONE. */ if (!sym_alloc_ccb(np)) goto attach_failed; /* * Calculate BUS addresses where we are going * to load the SCRIPTS. */ np->scripta_ba = vtobus(np->scripta0); np->scriptb_ba = vtobus(np->scriptb0); np->scriptz_ba = vtobus(np->scriptz0); if (np->ram_ba) { np->scripta_ba = np->ram_ba; if (np->features & FE_RAM8K) { np->scriptb_ba = np->scripta_ba + 4096; #if 0 /* May get useful for 64 BIT PCI addressing */ np->scr_ram_seg = cpu_to_scr(np->scripta_ba >> 32); #endif } } /* * Copy scripts to controller instance. */ memcpy(np->scripta0, fw->a_base, np->scripta_sz); memcpy(np->scriptb0, fw->b_base, np->scriptb_sz); memcpy(np->scriptz0, fw->z_base, np->scriptz_sz); /* * Setup variable parts in scripts and compute * scripts bus addresses used from the C code. */ np->fw_setup(np, fw); /* * Bind SCRIPTS with physical addresses usable by the * SCRIPTS processor (as seen from the BUS = BUS addresses). */ sym_fw_bind_script(np, (u32 *) np->scripta0, np->scripta_sz); sym_fw_bind_script(np, (u32 *) np->scriptb0, np->scriptb_sz); sym_fw_bind_script(np, (u32 *) np->scriptz0, np->scriptz_sz); #ifdef SYM_CONF_IARB_SUPPORT /* * If user wants IARB to be set when we win arbitration * and have other jobs, compute the max number of consecutive * settings of IARB hints before we leave devices a chance to * arbitrate for reselection. */ #ifdef SYM_SETUP_IARB_MAX np->iarb_max = SYM_SETUP_IARB_MAX; #else np->iarb_max = 4; #endif #endif /* * Prepare the idle and invalid task actions. */ np->idletask.start = cpu_to_scr(SCRIPTA_BA(np, idle)); np->idletask.restart = cpu_to_scr(SCRIPTB_BA(np, bad_i_t_l)); np->idletask_ba = vtobus(&np->idletask); np->notask.start = cpu_to_scr(SCRIPTA_BA(np, idle)); np->notask.restart = cpu_to_scr(SCRIPTB_BA(np, bad_i_t_l)); np->notask_ba = vtobus(&np->notask); np->bad_itl.start = cpu_to_scr(SCRIPTA_BA(np, idle)); np->bad_itl.restart = cpu_to_scr(SCRIPTB_BA(np, bad_i_t_l)); np->bad_itl_ba = vtobus(&np->bad_itl); np->bad_itlq.start = cpu_to_scr(SCRIPTA_BA(np, idle)); np->bad_itlq.restart = cpu_to_scr(SCRIPTB_BA(np,bad_i_t_l_q)); np->bad_itlq_ba = vtobus(&np->bad_itlq); /* * Allocate and prepare the lun JUMP table that is used * for a target prior the probing of devices (bad lun table). * A private table will be allocated for the target on the * first INQUIRY response received. */ np->badluntbl = sym_calloc_dma(256, "BADLUNTBL"); if (!np->badluntbl) goto attach_failed; np->badlun_sa = cpu_to_scr(SCRIPTB_BA(np, resel_bad_lun)); memset32(np->badluntbl, cpu_to_scr(vtobus(&np->badlun_sa)), 64); /* * Prepare the bus address array that contains the bus * address of each target control block. * For now, assume all logical units are wrong. :) */ for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { np->targtbl[i] = cpu_to_scr(vtobus(&np->target[i])); np->target[i].head.luntbl_sa = cpu_to_scr(vtobus(np->badluntbl)); np->target[i].head.lun0_sa = cpu_to_scr(vtobus(&np->badlun_sa)); } /* * Now check the cache handling of the pci chipset. */ if (sym_snooptest (np)) { printf("%s: CACHE INCORRECTLY CONFIGURED.\n", sym_name(np)); goto attach_failed; } /* * Sigh! we are done. */ return 0; attach_failed: return -ENXIO; } /* * Free everything that has been allocated for this device. */ void sym_hcb_free(struct sym_hcb *np) { SYM_QUEHEAD *qp; struct sym_ccb *cp; struct sym_tcb *tp; int target; if (np->scriptz0) sym_mfree_dma(np->scriptz0, np->scriptz_sz, "SCRIPTZ0"); if (np->scriptb0) sym_mfree_dma(np->scriptb0, np->scriptb_sz, "SCRIPTB0"); if (np->scripta0) sym_mfree_dma(np->scripta0, np->scripta_sz, "SCRIPTA0"); if (np->squeue) sym_mfree_dma(np->squeue, sizeof(u32)*(MAX_QUEUE*2), "SQUEUE"); if (np->dqueue) sym_mfree_dma(np->dqueue, sizeof(u32)*(MAX_QUEUE*2), "DQUEUE"); if (np->actccbs) { while ((qp = sym_remque_head(&np->free_ccbq)) != NULL) { cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); sym_mfree_dma(cp, sizeof(*cp), "CCB"); } } kfree(np->ccbh); if (np->badluntbl) sym_mfree_dma(np->badluntbl, 256,"BADLUNTBL"); for (target = 0; target < SYM_CONF_MAX_TARGET ; target++) { tp = &np->target[target]; if (tp->luntbl) sym_mfree_dma(tp->luntbl, 256, "LUNTBL"); #if SYM_CONF_MAX_LUN > 1 kfree(tp->lunmp); #endif } if (np->targtbl) sym_mfree_dma(np->targtbl, 256, "TARGTBL"); }
linux-master
drivers/scsi/sym53c8xx_2/sym_hipd.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family * of PCI-SCSI IO processors. * * Copyright (C) 1999-2001 Gerard Roudier <[email protected]> * * This driver is derived from the Linux sym53c8xx driver. * Copyright (C) 1998-2000 Gerard Roudier * * The sym53c8xx driver is derived from the ncr53c8xx driver that had been * a port of the FreeBSD ncr driver to Linux-1.2.13. * * The original ncr driver has been written for 386bsd and FreeBSD by * Wolfgang Stanglmeier <[email protected]> * Stefan Esser <[email protected]> * Copyright (C) 1994 Wolfgang Stanglmeier * * Other major contributions: * * NVRAM detection and reading. * Copyright (C) 1997 Richard Waltham <[email protected]> * *----------------------------------------------------------------------------- */ #include "sym_glue.h" /* * Macros used for all firmwares. */ #define SYM_GEN_A(s, label) ((short) offsetof(s, label)), #define SYM_GEN_B(s, label) ((short) offsetof(s, label)), #define SYM_GEN_Z(s, label) ((short) offsetof(s, label)), #define PADDR_A(label) SYM_GEN_PADDR_A(struct SYM_FWA_SCR, label) #define PADDR_B(label) SYM_GEN_PADDR_B(struct SYM_FWB_SCR, label) #if SYM_CONF_GENERIC_SUPPORT /* * Allocate firmware #1 script area. */ #define SYM_FWA_SCR sym_fw1a_scr #define SYM_FWB_SCR sym_fw1b_scr #define SYM_FWZ_SCR sym_fw1z_scr #include "sym_fw1.h" static struct sym_fwa_ofs sym_fw1a_ofs = { SYM_GEN_FW_A(struct SYM_FWA_SCR) }; static struct sym_fwb_ofs sym_fw1b_ofs = { SYM_GEN_FW_B(struct SYM_FWB_SCR) }; static struct sym_fwz_ofs sym_fw1z_ofs = { SYM_GEN_FW_Z(struct SYM_FWZ_SCR) }; #undef SYM_FWA_SCR #undef SYM_FWB_SCR #undef SYM_FWZ_SCR #endif /* SYM_CONF_GENERIC_SUPPORT */ /* * Allocate firmware #2 script area. */ #define SYM_FWA_SCR sym_fw2a_scr #define SYM_FWB_SCR sym_fw2b_scr #define SYM_FWZ_SCR sym_fw2z_scr #include "sym_fw2.h" static struct sym_fwa_ofs sym_fw2a_ofs = { SYM_GEN_FW_A(struct SYM_FWA_SCR) }; static struct sym_fwb_ofs sym_fw2b_ofs = { SYM_GEN_FW_B(struct SYM_FWB_SCR) SYM_GEN_B(struct SYM_FWB_SCR, start64) SYM_GEN_B(struct SYM_FWB_SCR, pm_handle) }; static struct sym_fwz_ofs sym_fw2z_ofs = { SYM_GEN_FW_Z(struct SYM_FWZ_SCR) }; #undef SYM_FWA_SCR #undef SYM_FWB_SCR #undef SYM_FWZ_SCR #undef SYM_GEN_A #undef SYM_GEN_B #undef SYM_GEN_Z #undef PADDR_A #undef PADDR_B #if SYM_CONF_GENERIC_SUPPORT /* * Patch routine for firmware #1. */ static void sym_fw1_patch(struct Scsi_Host *shost) { struct sym_hcb *np = sym_get_hcb(shost); struct sym_fw1a_scr *scripta0; struct sym_fw1b_scr *scriptb0; scripta0 = (struct sym_fw1a_scr *) np->scripta0; scriptb0 = (struct sym_fw1b_scr *) np->scriptb0; /* * Remove LED support if not needed. */ if (!(np->features & FE_LED0)) { scripta0->idle[0] = cpu_to_scr(SCR_NO_OP); scripta0->reselected[0] = cpu_to_scr(SCR_NO_OP); scripta0->start[0] = cpu_to_scr(SCR_NO_OP); } #ifdef SYM_CONF_IARB_SUPPORT /* * If user does not want to use IMMEDIATE ARBITRATION * when we are reselected while attempting to arbitrate, * patch the SCRIPTS accordingly with a SCRIPT NO_OP. */ if (!SYM_CONF_SET_IARB_ON_ARB_LOST) scripta0->ungetjob[0] = cpu_to_scr(SCR_NO_OP); #endif /* * Patch some data in SCRIPTS. * - start and done queue initial bus address. * - target bus address table bus address. */ scriptb0->startpos[0] = cpu_to_scr(np->squeue_ba); scriptb0->done_pos[0] = cpu_to_scr(np->dqueue_ba); scriptb0->targtbl[0] = cpu_to_scr(np->targtbl_ba); } #endif /* SYM_CONF_GENERIC_SUPPORT */ /* * Patch routine for firmware #2. */ static void sym_fw2_patch(struct Scsi_Host *shost) { struct sym_data *sym_data = shost_priv(shost); struct pci_dev *pdev = sym_data->pdev; struct sym_hcb *np = sym_data->ncb; struct sym_fw2a_scr *scripta0; struct sym_fw2b_scr *scriptb0; scripta0 = (struct sym_fw2a_scr *) np->scripta0; scriptb0 = (struct sym_fw2b_scr *) np->scriptb0; /* * Remove LED support if not needed. */ if (!(np->features & FE_LED0)) { scripta0->idle[0] = cpu_to_scr(SCR_NO_OP); scripta0->reselected[0] = cpu_to_scr(SCR_NO_OP); scripta0->start[0] = cpu_to_scr(SCR_NO_OP); } #if SYM_CONF_DMA_ADDRESSING_MODE == 2 /* * Remove useless 64 bit DMA specific SCRIPTS, * when this feature is not available. */ if (!use_dac(np)) { scripta0->is_dmap_dirty[0] = cpu_to_scr(SCR_NO_OP); scripta0->is_dmap_dirty[1] = 0; scripta0->is_dmap_dirty[2] = cpu_to_scr(SCR_NO_OP); scripta0->is_dmap_dirty[3] = 0; } #endif #ifdef SYM_CONF_IARB_SUPPORT /* * If user does not want to use IMMEDIATE ARBITRATION * when we are reselected while attempting to arbitrate, * patch the SCRIPTS accordingly with a SCRIPT NO_OP. */ if (!SYM_CONF_SET_IARB_ON_ARB_LOST) scripta0->ungetjob[0] = cpu_to_scr(SCR_NO_OP); #endif /* * Patch some variable in SCRIPTS. * - start and done queue initial bus address. * - target bus address table bus address. */ scriptb0->startpos[0] = cpu_to_scr(np->squeue_ba); scriptb0->done_pos[0] = cpu_to_scr(np->dqueue_ba); scriptb0->targtbl[0] = cpu_to_scr(np->targtbl_ba); /* * Remove the load of SCNTL4 on reselection if not a C10. */ if (!(np->features & FE_C10)) { scripta0->resel_scntl4[0] = cpu_to_scr(SCR_NO_OP); scripta0->resel_scntl4[1] = cpu_to_scr(0); } /* * Remove a couple of work-arounds specific to C1010 if * they are not desirable. See `sym_fw2.h' for more details. */ if (!(pdev->device == PCI_DEVICE_ID_LSI_53C1010_66 && pdev->revision < 0x1 && np->pciclk_khz < 60000)) { scripta0->datao_phase[0] = cpu_to_scr(SCR_NO_OP); scripta0->datao_phase[1] = cpu_to_scr(0); } if (!(pdev->device == PCI_DEVICE_ID_LSI_53C1010_33 /* && pdev->revision < 0xff */)) { scripta0->sel_done[0] = cpu_to_scr(SCR_NO_OP); scripta0->sel_done[1] = cpu_to_scr(0); } /* * Patch some other variables in SCRIPTS. * These ones are loaded by the SCRIPTS processor. */ scriptb0->pm0_data_addr[0] = cpu_to_scr(np->scripta_ba + offsetof(struct sym_fw2a_scr, pm0_data)); scriptb0->pm1_data_addr[0] = cpu_to_scr(np->scripta_ba + offsetof(struct sym_fw2a_scr, pm1_data)); } /* * Fill the data area in scripts. * To be done for all firmwares. */ static void sym_fw_fill_data (u32 *in, u32 *out) { int i; for (i = 0; i < SYM_CONF_MAX_SG; i++) { *in++ = SCR_CHMOV_TBL ^ SCR_DATA_IN; *in++ = offsetof (struct sym_dsb, data[i]); *out++ = SCR_CHMOV_TBL ^ SCR_DATA_OUT; *out++ = offsetof (struct sym_dsb, data[i]); } } /* * Setup useful script bus addresses. * To be done for all firmwares. */ static void sym_fw_setup_bus_addresses(struct sym_hcb *np, struct sym_fw *fw) { u32 *pa; u_short *po; int i; /* * Build the bus address table for script A * from the script A offset table. */ po = (u_short *) fw->a_ofs; pa = (u32 *) &np->fwa_bas; for (i = 0 ; i < sizeof(np->fwa_bas)/sizeof(u32) ; i++) pa[i] = np->scripta_ba + po[i]; /* * Same for script B. */ po = (u_short *) fw->b_ofs; pa = (u32 *) &np->fwb_bas; for (i = 0 ; i < sizeof(np->fwb_bas)/sizeof(u32) ; i++) pa[i] = np->scriptb_ba + po[i]; /* * Same for script Z. */ po = (u_short *) fw->z_ofs; pa = (u32 *) &np->fwz_bas; for (i = 0 ; i < sizeof(np->fwz_bas)/sizeof(u32) ; i++) pa[i] = np->scriptz_ba + po[i]; } #if SYM_CONF_GENERIC_SUPPORT /* * Setup routine for firmware #1. */ static void sym_fw1_setup(struct sym_hcb *np, struct sym_fw *fw) { struct sym_fw1a_scr *scripta0; scripta0 = (struct sym_fw1a_scr *) np->scripta0; /* * Fill variable parts in scripts. */ sym_fw_fill_data(scripta0->data_in, scripta0->data_out); /* * Setup bus addresses used from the C code.. */ sym_fw_setup_bus_addresses(np, fw); } #endif /* SYM_CONF_GENERIC_SUPPORT */ /* * Setup routine for firmware #2. */ static void sym_fw2_setup(struct sym_hcb *np, struct sym_fw *fw) { struct sym_fw2a_scr *scripta0; scripta0 = (struct sym_fw2a_scr *) np->scripta0; /* * Fill variable parts in scripts. */ sym_fw_fill_data(scripta0->data_in, scripta0->data_out); /* * Setup bus addresses used from the C code.. */ sym_fw_setup_bus_addresses(np, fw); } /* * Allocate firmware descriptors. */ #if SYM_CONF_GENERIC_SUPPORT static struct sym_fw sym_fw1 = SYM_FW_ENTRY(sym_fw1, "NCR-generic"); #endif /* SYM_CONF_GENERIC_SUPPORT */ static struct sym_fw sym_fw2 = SYM_FW_ENTRY(sym_fw2, "LOAD/STORE-based"); /* * Find the most appropriate firmware for a chip. */ struct sym_fw * sym_find_firmware(struct sym_chip *chip) { if (chip->features & FE_LDSTR) return &sym_fw2; #if SYM_CONF_GENERIC_SUPPORT else if (!(chip->features & (FE_PFEN|FE_NOPM|FE_DAC))) return &sym_fw1; #endif else return NULL; } /* * Bind a script to physical addresses. */ void sym_fw_bind_script(struct sym_hcb *np, u32 *start, int len) { u32 opcode, new, old, tmp1, tmp2; u32 *end, *cur; int relocs; cur = start; end = start + len/4; while (cur < end) { opcode = *cur; /* * If we forget to change the length * in scripts, a field will be * padded with 0. This is an illegal * command. */ if (opcode == 0) { printf ("%s: ERROR0 IN SCRIPT at %d.\n", sym_name(np), (int) (cur-start)); ++cur; continue; } /* * We use the bogus value 0xf00ff00f ;-) * to reserve data area in SCRIPTS. */ if (opcode == SCR_DATA_ZERO) { *cur++ = 0; continue; } if (DEBUG_FLAGS & DEBUG_SCRIPT) printf ("%d: <%x>\n", (int) (cur-start), (unsigned)opcode); /* * We don't have to decode ALL commands */ switch (opcode >> 28) { case 0xf: /* * LOAD / STORE DSA relative, don't relocate. */ relocs = 0; break; case 0xe: /* * LOAD / STORE absolute. */ relocs = 1; break; case 0xc: /* * COPY has TWO arguments. */ relocs = 2; tmp1 = cur[1]; tmp2 = cur[2]; if ((tmp1 ^ tmp2) & 3) { printf ("%s: ERROR1 IN SCRIPT at %d.\n", sym_name(np), (int) (cur-start)); } /* * If PREFETCH feature not enabled, remove * the NO FLUSH bit if present. */ if ((opcode & SCR_NO_FLUSH) && !(np->features & FE_PFEN)) { opcode = (opcode & ~SCR_NO_FLUSH); } break; case 0x0: /* * MOVE/CHMOV (absolute address) */ if (!(np->features & FE_WIDE)) opcode = (opcode | OPC_MOVE); relocs = 1; break; case 0x1: /* * MOVE/CHMOV (table indirect) */ if (!(np->features & FE_WIDE)) opcode = (opcode | OPC_MOVE); relocs = 0; break; #ifdef SYM_CONF_TARGET_ROLE_SUPPORT case 0x2: /* * MOVE/CHMOV in target role (absolute address) */ opcode &= ~0x20000000; if (!(np->features & FE_WIDE)) opcode = (opcode & ~OPC_TCHMOVE); relocs = 1; break; case 0x3: /* * MOVE/CHMOV in target role (table indirect) */ opcode &= ~0x20000000; if (!(np->features & FE_WIDE)) opcode = (opcode & ~OPC_TCHMOVE); relocs = 0; break; #endif case 0x8: /* * JUMP / CALL * don't relocate if relative :-) */ if (opcode & 0x00800000) relocs = 0; else if ((opcode & 0xf8400000) == 0x80400000)/*JUMP64*/ relocs = 2; else relocs = 1; break; case 0x4: case 0x5: case 0x6: case 0x7: relocs = 1; break; default: relocs = 0; break; } /* * Scriptify:) the opcode. */ *cur++ = cpu_to_scr(opcode); /* * If no relocation, assume 1 argument * and just scriptize:) it. */ if (!relocs) { *cur = cpu_to_scr(*cur); ++cur; continue; } /* * Otherwise performs all needed relocations. */ while (relocs--) { old = *cur; switch (old & RELOC_MASK) { case RELOC_REGISTER: new = (old & ~RELOC_MASK) + np->mmio_ba; break; case RELOC_LABEL_A: new = (old & ~RELOC_MASK) + np->scripta_ba; break; case RELOC_LABEL_B: new = (old & ~RELOC_MASK) + np->scriptb_ba; break; case RELOC_SOFTC: new = (old & ~RELOC_MASK) + np->hcb_ba; break; case 0: /* * Don't relocate a 0 address. * They are mostly used for patched or * script self-modified areas. */ if (old == 0) { new = old; break; } fallthrough; default: new = 0; panic("sym_fw_bind_script: " "weird relocation %x\n", old); break; } *cur++ = cpu_to_scr(new); } } }
linux-master
drivers/scsi/sym53c8xx_2/sym_fw.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2015 Linaro Ltd. * Copyright (c) 2015 Hisilicon Limited. */ #include "hisi_sas.h" #define DRV_NAME "hisi_sas" #define DEV_IS_GONE(dev) \ ((!dev) || (dev->dev_type == SAS_PHY_UNUSED)) static int hisi_sas_softreset_ata_disk(struct domain_device *device); static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, void *funcdata); static void hisi_sas_release_task(struct hisi_hba *hisi_hba, struct domain_device *device); static void hisi_sas_dev_gone(struct domain_device *device); struct hisi_sas_internal_abort_data { bool rst_ha_timeout; /* reset the HA for timeout */ }; u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction) { switch (fis->command) { case ATA_CMD_FPDMA_WRITE: case ATA_CMD_FPDMA_READ: case ATA_CMD_FPDMA_RECV: case ATA_CMD_FPDMA_SEND: case ATA_CMD_NCQ_NON_DATA: return HISI_SAS_SATA_PROTOCOL_FPDMA; case ATA_CMD_DOWNLOAD_MICRO: case ATA_CMD_ID_ATA: case ATA_CMD_PMP_READ: case ATA_CMD_READ_LOG_EXT: case ATA_CMD_PIO_READ: case ATA_CMD_PIO_READ_EXT: case ATA_CMD_PMP_WRITE: case ATA_CMD_WRITE_LOG_EXT: case ATA_CMD_PIO_WRITE: case ATA_CMD_PIO_WRITE_EXT: return HISI_SAS_SATA_PROTOCOL_PIO; case ATA_CMD_DSM: case ATA_CMD_DOWNLOAD_MICRO_DMA: case ATA_CMD_PMP_READ_DMA: case ATA_CMD_PMP_WRITE_DMA: case ATA_CMD_READ: case ATA_CMD_READ_EXT: case ATA_CMD_READ_LOG_DMA_EXT: case ATA_CMD_READ_STREAM_DMA_EXT: case ATA_CMD_TRUSTED_RCV_DMA: case ATA_CMD_TRUSTED_SND_DMA: case ATA_CMD_WRITE: case ATA_CMD_WRITE_EXT: case ATA_CMD_WRITE_FUA_EXT: case ATA_CMD_WRITE_QUEUED: case ATA_CMD_WRITE_LOG_DMA_EXT: case ATA_CMD_WRITE_STREAM_DMA_EXT: case ATA_CMD_ZAC_MGMT_IN: return HISI_SAS_SATA_PROTOCOL_DMA; case ATA_CMD_CHK_POWER: case ATA_CMD_DEV_RESET: case ATA_CMD_EDD: case ATA_CMD_FLUSH: case ATA_CMD_FLUSH_EXT: case ATA_CMD_VERIFY: case ATA_CMD_VERIFY_EXT: case ATA_CMD_SET_FEATURES: case ATA_CMD_STANDBY: case ATA_CMD_STANDBYNOW1: case ATA_CMD_ZAC_MGMT_OUT: return HISI_SAS_SATA_PROTOCOL_NONDATA; case ATA_CMD_SET_MAX: switch (fis->features) { case ATA_SET_MAX_PASSWD: case ATA_SET_MAX_LOCK: return HISI_SAS_SATA_PROTOCOL_PIO; case ATA_SET_MAX_PASSWD_DMA: case ATA_SET_MAX_UNLOCK_DMA: return HISI_SAS_SATA_PROTOCOL_DMA; default: return HISI_SAS_SATA_PROTOCOL_NONDATA; } default: { if (direction == DMA_NONE) return HISI_SAS_SATA_PROTOCOL_NONDATA; return HISI_SAS_SATA_PROTOCOL_PIO; } } } EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol); void hisi_sas_sata_done(struct sas_task *task, struct hisi_sas_slot *slot) { struct task_status_struct *ts = &task->task_status; struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf; struct hisi_sas_status_buffer *status_buf = hisi_sas_status_buf_addr_mem(slot); u8 *iu = &status_buf->iu[0]; struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu; resp->frame_len = sizeof(struct dev_to_host_fis); memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis)); ts->buf_valid_size = sizeof(*resp); } EXPORT_SYMBOL_GPL(hisi_sas_sata_done); /* * This function assumes linkrate mask fits in 8 bits, which it * does for all HW versions supported. */ u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max) { u8 rate = 0; int i; max -= SAS_LINK_RATE_1_5_GBPS; for (i = 0; i <= max; i++) rate |= 1 << (i * 2); return rate; } EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask); static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device) { return device->port->ha->lldd_ha; } struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port) { return container_of(sas_port, struct hisi_sas_port, sas_port); } EXPORT_SYMBOL_GPL(to_hisi_sas_port); void hisi_sas_stop_phys(struct hisi_hba *hisi_hba) { int phy_no; for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) hisi_sas_phy_enable(hisi_hba, phy_no, 0); } EXPORT_SYMBOL_GPL(hisi_sas_stop_phys); static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx) { void *bitmap = hisi_hba->slot_index_tags; __clear_bit(slot_idx, bitmap); } static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx) { if (hisi_hba->hw->slot_index_alloc || slot_idx < HISI_SAS_RESERVED_IPTT) { spin_lock(&hisi_hba->lock); hisi_sas_slot_index_clear(hisi_hba, slot_idx); spin_unlock(&hisi_hba->lock); } } static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx) { void *bitmap = hisi_hba->slot_index_tags; __set_bit(slot_idx, bitmap); } static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, struct request *rq) { int index; void *bitmap = hisi_hba->slot_index_tags; if (rq) return rq->tag + HISI_SAS_RESERVED_IPTT; spin_lock(&hisi_hba->lock); index = find_next_zero_bit(bitmap, HISI_SAS_RESERVED_IPTT, hisi_hba->last_slot_index + 1); if (index >= HISI_SAS_RESERVED_IPTT) { index = find_next_zero_bit(bitmap, HISI_SAS_RESERVED_IPTT, 0); if (index >= HISI_SAS_RESERVED_IPTT) { spin_unlock(&hisi_hba->lock); return -SAS_QUEUE_FULL; } } hisi_sas_slot_index_set(hisi_hba, index); hisi_hba->last_slot_index = index; spin_unlock(&hisi_hba->lock); return index; } void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, struct hisi_sas_slot *slot, bool need_lock) { int device_id = slot->device_id; struct hisi_sas_device *sas_dev = &hisi_hba->devices[device_id]; if (task) { struct device *dev = hisi_hba->dev; if (!task->lldd_task) return; task->lldd_task = NULL; if (!sas_protocol_ata(task->task_proto)) { if (slot->n_elem) { if (task->task_proto & SAS_PROTOCOL_SSP) dma_unmap_sg(dev, task->scatter, task->num_scatter, task->data_dir); else dma_unmap_sg(dev, &task->smp_task.smp_req, 1, DMA_TO_DEVICE); } if (slot->n_elem_dif) { struct sas_ssp_task *ssp_task = &task->ssp_task; struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), scsi_prot_sg_count(scsi_cmnd), task->data_dir); } } } if (need_lock) { spin_lock(&sas_dev->lock); list_del_init(&slot->entry); spin_unlock(&sas_dev->lock); } else { list_del_init(&slot->entry); } memset(slot, 0, offsetof(struct hisi_sas_slot, buf)); hisi_sas_slot_index_free(hisi_hba, slot->idx); } EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free); static void hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) { hisi_hba->hw->prep_smp(hisi_hba, slot); } static void hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) { hisi_hba->hw->prep_ssp(hisi_hba, slot); } static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) { hisi_hba->hw->prep_stp(hisi_hba, slot); } static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) { hisi_hba->hw->prep_abort(hisi_hba, slot); } static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba, struct sas_task *task, int n_elem) { struct device *dev = hisi_hba->dev; if (!sas_protocol_ata(task->task_proto) && n_elem) { if (task->num_scatter) { dma_unmap_sg(dev, task->scatter, task->num_scatter, task->data_dir); } else if (task->task_proto & SAS_PROTOCOL_SMP) { dma_unmap_sg(dev, &task->smp_task.smp_req, 1, DMA_TO_DEVICE); } } } static int hisi_sas_dma_map(struct hisi_hba *hisi_hba, struct sas_task *task, int *n_elem) { struct device *dev = hisi_hba->dev; int rc; if (sas_protocol_ata(task->task_proto)) { *n_elem = task->num_scatter; } else { unsigned int req_len; if (task->num_scatter) { *n_elem = dma_map_sg(dev, task->scatter, task->num_scatter, task->data_dir); if (!*n_elem) { rc = -ENOMEM; goto prep_out; } } else if (task->task_proto & SAS_PROTOCOL_SMP) { *n_elem = dma_map_sg(dev, &task->smp_task.smp_req, 1, DMA_TO_DEVICE); if (!*n_elem) { rc = -ENOMEM; goto prep_out; } req_len = sg_dma_len(&task->smp_task.smp_req); if (req_len & 0x3) { rc = -EINVAL; goto err_out_dma_unmap; } } } if (*n_elem > HISI_SAS_SGE_PAGE_CNT) { dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT\n", *n_elem); rc = -EINVAL; goto err_out_dma_unmap; } return 0; err_out_dma_unmap: /* It would be better to call dma_unmap_sg() here, but it's messy */ hisi_sas_dma_unmap(hisi_hba, task, *n_elem); prep_out: return rc; } static void hisi_sas_dif_dma_unmap(struct hisi_hba *hisi_hba, struct sas_task *task, int n_elem_dif) { struct device *dev = hisi_hba->dev; if (n_elem_dif) { struct sas_ssp_task *ssp_task = &task->ssp_task; struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), scsi_prot_sg_count(scsi_cmnd), task->data_dir); } } static int hisi_sas_dif_dma_map(struct hisi_hba *hisi_hba, int *n_elem_dif, struct sas_task *task) { struct device *dev = hisi_hba->dev; struct sas_ssp_task *ssp_task; struct scsi_cmnd *scsi_cmnd; int rc; if (task->num_scatter) { ssp_task = &task->ssp_task; scsi_cmnd = ssp_task->cmd; if (scsi_prot_sg_count(scsi_cmnd)) { *n_elem_dif = dma_map_sg(dev, scsi_prot_sglist(scsi_cmnd), scsi_prot_sg_count(scsi_cmnd), task->data_dir); if (!*n_elem_dif) return -ENOMEM; if (*n_elem_dif > HISI_SAS_SGE_DIF_PAGE_CNT) { dev_err(dev, "task prep: n_elem_dif(%d) too large\n", *n_elem_dif); rc = -EINVAL; goto err_out_dif_dma_unmap; } } } return 0; err_out_dif_dma_unmap: dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), scsi_prot_sg_count(scsi_cmnd), task->data_dir); return rc; } static void hisi_sas_task_deliver(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot, struct hisi_sas_dq *dq, struct hisi_sas_device *sas_dev) { struct hisi_sas_cmd_hdr *cmd_hdr_base; int dlvry_queue_slot, dlvry_queue; struct sas_task *task = slot->task; int wr_q_index; spin_lock(&dq->lock); wr_q_index = dq->wr_point; dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS; list_add_tail(&slot->delivery, &dq->list); spin_unlock(&dq->lock); spin_lock(&sas_dev->lock); list_add_tail(&slot->entry, &sas_dev->list); spin_unlock(&sas_dev->lock); dlvry_queue = dq->id; dlvry_queue_slot = wr_q_index; slot->device_id = sas_dev->device_id; slot->dlvry_queue = dlvry_queue; slot->dlvry_queue_slot = dlvry_queue_slot; cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue]; slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot]; task->lldd_task = slot; memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ); memset(hisi_sas_status_buf_addr_mem(slot), 0, sizeof(struct hisi_sas_err_record)); switch (task->task_proto) { case SAS_PROTOCOL_SMP: hisi_sas_task_prep_smp(hisi_hba, slot); break; case SAS_PROTOCOL_SSP: hisi_sas_task_prep_ssp(hisi_hba, slot); break; case SAS_PROTOCOL_SATA: case SAS_PROTOCOL_STP: case SAS_PROTOCOL_STP_ALL: hisi_sas_task_prep_ata(hisi_hba, slot); break; case SAS_PROTOCOL_INTERNAL_ABORT: hisi_sas_task_prep_abort(hisi_hba, slot); break; default: return; } /* Make slot memories observable before marking as ready */ smp_wmb(); WRITE_ONCE(slot->ready, 1); spin_lock(&dq->lock); hisi_hba->hw->start_delivery(dq); spin_unlock(&dq->lock); } static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags) { int n_elem = 0, n_elem_dif = 0; struct domain_device *device = task->dev; struct asd_sas_port *sas_port = device->port; struct hisi_sas_device *sas_dev = device->lldd_dev; bool internal_abort = sas_is_internal_abort(task); struct hisi_sas_dq *dq = NULL; struct hisi_sas_port *port; struct hisi_hba *hisi_hba; struct hisi_sas_slot *slot; struct request *rq = NULL; struct device *dev; int rc; if (!sas_port) { struct task_status_struct *ts = &task->task_status; ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_PHY_DOWN; /* * libsas will use dev->port, should * not call task_done for sata */ if (device->dev_type != SAS_SATA_DEV && !internal_abort) task->task_done(task); return -ECOMM; } hisi_hba = dev_to_hisi_hba(device); dev = hisi_hba->dev; switch (task->task_proto) { case SAS_PROTOCOL_SSP: case SAS_PROTOCOL_SMP: case SAS_PROTOCOL_SATA: case SAS_PROTOCOL_STP: case SAS_PROTOCOL_STP_ALL: if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) { if (!gfpflags_allow_blocking(gfp_flags)) return -EINVAL; down(&hisi_hba->sem); up(&hisi_hba->sem); } if (DEV_IS_GONE(sas_dev)) { if (sas_dev) dev_info(dev, "task prep: device %d not ready\n", sas_dev->device_id); else dev_info(dev, "task prep: device %016llx not ready\n", SAS_ADDR(device->sas_addr)); return -ECOMM; } port = to_hisi_sas_port(sas_port); if (!port->port_attached) { dev_info(dev, "task prep: %s port%d not attach device\n", dev_is_sata(device) ? "SATA/STP" : "SAS", device->port->id); return -ECOMM; } rq = sas_task_find_rq(task); if (rq) { unsigned int dq_index; u32 blk_tag; blk_tag = blk_mq_unique_tag(rq); dq_index = blk_mq_unique_tag_to_hwq(blk_tag); dq = &hisi_hba->dq[dq_index]; } else { int queue; if (hisi_hba->iopoll_q_cnt) { /* * Use interrupt queue (queue 0) to deliver and complete * internal IOs of libsas or libata when there is at least * one iopoll queue */ queue = 0; } else { struct Scsi_Host *shost = hisi_hba->shost; struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; queue = qmap->mq_map[raw_smp_processor_id()]; } dq = &hisi_hba->dq[queue]; } break; case SAS_PROTOCOL_INTERNAL_ABORT: if (!hisi_hba->hw->prep_abort) return TMF_RESP_FUNC_FAILED; if (test_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags)) return -EIO; hisi_hba = dev_to_hisi_hba(device); if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) return -EINVAL; port = to_hisi_sas_port(sas_port); dq = &hisi_hba->dq[task->abort_task.qid]; break; default: dev_err(hisi_hba->dev, "task prep: unknown/unsupported proto (0x%x)\n", task->task_proto); return -EINVAL; } rc = hisi_sas_dma_map(hisi_hba, task, &n_elem); if (rc < 0) goto prep_out; if (!sas_protocol_ata(task->task_proto)) { rc = hisi_sas_dif_dma_map(hisi_hba, &n_elem_dif, task); if (rc < 0) goto err_out_dma_unmap; } if (!internal_abort && hisi_hba->hw->slot_index_alloc) rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device); else rc = hisi_sas_slot_index_alloc(hisi_hba, rq); if (rc < 0) goto err_out_dif_dma_unmap; slot = &hisi_hba->slot_info[rc]; slot->n_elem = n_elem; slot->n_elem_dif = n_elem_dif; slot->task = task; slot->port = port; slot->tmf = task->tmf; slot->is_internal = !!task->tmf || internal_abort; /* protect task_prep and start_delivery sequence */ hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev); return 0; err_out_dif_dma_unmap: if (!sas_protocol_ata(task->task_proto)) hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif); err_out_dma_unmap: hisi_sas_dma_unmap(hisi_hba, task, n_elem); prep_out: dev_err(dev, "task exec: failed[%d]!\n", rc); return rc; } static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no, gfp_t gfp_flags) { struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; struct asd_sas_phy *sas_phy = &phy->sas_phy; if (!phy->phy_attached) return; sas_notify_phy_event(sas_phy, PHYE_OOB_DONE, gfp_flags); if (sas_phy->phy) { struct sas_phy *sphy = sas_phy->phy; sphy->negotiated_linkrate = sas_phy->linkrate; sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; sphy->maximum_linkrate_hw = hisi_hba->hw->phy_get_max_linkrate(); if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) sphy->minimum_linkrate = phy->minimum_linkrate; if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) sphy->maximum_linkrate = phy->maximum_linkrate; } if (phy->phy_type & PORT_TYPE_SAS) { struct sas_identify_frame *id; id = (struct sas_identify_frame *)phy->frame_rcvd; id->dev_type = phy->identify.device_type; id->initiator_bits = SAS_PROTOCOL_ALL; id->target_bits = phy->identify.target_port_protocols; } else if (phy->phy_type & PORT_TYPE_SATA) { /* Nothing */ } sas_phy->frame_rcvd_size = phy->frame_rcvd_size; sas_notify_port_event(sas_phy, PORTE_BYTES_DMAED, gfp_flags); } static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device) { struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); struct hisi_sas_device *sas_dev = NULL; int last = hisi_hba->last_dev_id; int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES; int i; spin_lock(&hisi_hba->lock); for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) { if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) { int queue = i % hisi_hba->queue_count; struct hisi_sas_dq *dq = &hisi_hba->dq[queue]; hisi_hba->devices[i].device_id = i; sas_dev = &hisi_hba->devices[i]; sas_dev->dev_status = HISI_SAS_DEV_INIT; sas_dev->dev_type = device->dev_type; sas_dev->hisi_hba = hisi_hba; sas_dev->sas_device = device; sas_dev->dq = dq; spin_lock_init(&sas_dev->lock); INIT_LIST_HEAD(&hisi_hba->devices[i].list); break; } i++; } hisi_hba->last_dev_id = i; spin_unlock(&hisi_hba->lock); return sas_dev; } static void hisi_sas_sync_poll_cq(struct hisi_sas_cq *cq) { /* make sure CQ entries being processed are processed to completion */ spin_lock(&cq->poll_lock); spin_unlock(&cq->poll_lock); } static bool hisi_sas_queue_is_poll(struct hisi_sas_cq *cq) { struct hisi_hba *hisi_hba = cq->hisi_hba; if (cq->id < hisi_hba->queue_count - hisi_hba->iopoll_q_cnt) return false; return true; } static void hisi_sas_sync_cq(struct hisi_sas_cq *cq) { if (hisi_sas_queue_is_poll(cq)) hisi_sas_sync_poll_cq(cq); else synchronize_irq(cq->irq_no); } void hisi_sas_sync_poll_cqs(struct hisi_hba *hisi_hba) { int i; for (i = 0; i < hisi_hba->queue_count; i++) { struct hisi_sas_cq *cq = &hisi_hba->cq[i]; if (hisi_sas_queue_is_poll(cq)) hisi_sas_sync_poll_cq(cq); } } EXPORT_SYMBOL_GPL(hisi_sas_sync_poll_cqs); void hisi_sas_sync_cqs(struct hisi_hba *hisi_hba) { int i; for (i = 0; i < hisi_hba->queue_count; i++) { struct hisi_sas_cq *cq = &hisi_hba->cq[i]; hisi_sas_sync_cq(cq); } } EXPORT_SYMBOL_GPL(hisi_sas_sync_cqs); static void hisi_sas_tmf_aborted(struct sas_task *task) { struct hisi_sas_slot *slot = task->lldd_task; struct domain_device *device = task->dev; struct hisi_sas_device *sas_dev = device->lldd_dev; struct hisi_hba *hisi_hba = sas_dev->hisi_hba; if (slot) { struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue]; /* * sync irq or poll queue to avoid free'ing task * before using task in IO completion */ hisi_sas_sync_cq(cq); slot->task = NULL; } } #define HISI_SAS_DISK_RECOVER_CNT 3 static int hisi_sas_init_device(struct domain_device *device) { int rc = TMF_RESP_FUNC_COMPLETE; struct scsi_lun lun; int retry = HISI_SAS_DISK_RECOVER_CNT; struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); switch (device->dev_type) { case SAS_END_DEVICE: int_to_scsilun(0, &lun); while (retry-- > 0) { rc = sas_abort_task_set(device, lun.scsi_lun); if (rc == TMF_RESP_FUNC_COMPLETE) { hisi_sas_release_task(hisi_hba, device); break; } } break; case SAS_SATA_DEV: case SAS_SATA_PM: case SAS_SATA_PM_PORT: case SAS_SATA_PENDING: /* * If an expander is swapped when a SATA disk is attached then * we should issue a hard reset to clear previous affiliation * of STP target port, see SPL (chapter 6.19.4). * * However we don't need to issue a hard reset here for these * reasons: * a. When probing the device, libsas/libata already issues a * hard reset in sas_probe_sata() -> ata_port_probe(). * Note that in hisi_sas_debug_I_T_nexus_reset() we take care * to issue a hard reset by checking the dev status (== INIT). * b. When resetting the controller, this is simply unnecessary. */ while (retry-- > 0) { rc = hisi_sas_softreset_ata_disk(device); if (!rc) break; } break; default: break; } return rc; } int hisi_sas_slave_alloc(struct scsi_device *sdev) { struct domain_device *ddev = sdev_to_domain_dev(sdev); struct hisi_sas_device *sas_dev = ddev->lldd_dev; int rc; rc = sas_slave_alloc(sdev); if (rc) return rc; rc = hisi_sas_init_device(ddev); if (rc) return rc; sas_dev->dev_status = HISI_SAS_DEV_NORMAL; return 0; } EXPORT_SYMBOL_GPL(hisi_sas_slave_alloc); static int hisi_sas_dev_found(struct domain_device *device) { struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); struct domain_device *parent_dev = device->parent; struct hisi_sas_device *sas_dev; struct device *dev = hisi_hba->dev; int rc; if (hisi_hba->hw->alloc_dev) sas_dev = hisi_hba->hw->alloc_dev(device); else sas_dev = hisi_sas_alloc_dev(device); if (!sas_dev) { dev_err(dev, "fail alloc dev: max support %d devices\n", HISI_SAS_MAX_DEVICES); return -EINVAL; } device->lldd_dev = sas_dev; hisi_hba->hw->setup_itct(hisi_hba, sas_dev); if (parent_dev && dev_is_expander(parent_dev->dev_type)) { int phy_no; phy_no = sas_find_attached_phy_id(&parent_dev->ex_dev, device); if (phy_no < 0) { dev_info(dev, "dev found: no attached " "dev:%016llx at ex:%016llx\n", SAS_ADDR(device->sas_addr), SAS_ADDR(parent_dev->sas_addr)); rc = phy_no; goto err_out; } } dev_info(dev, "dev[%d:%x] found\n", sas_dev->device_id, sas_dev->dev_type); return 0; err_out: hisi_sas_dev_gone(device); return rc; } int hisi_sas_slave_configure(struct scsi_device *sdev) { struct domain_device *dev = sdev_to_domain_dev(sdev); int ret = sas_slave_configure(sdev); if (ret) return ret; if (!dev_is_sata(dev)) sas_change_queue_depth(sdev, 64); return 0; } EXPORT_SYMBOL_GPL(hisi_sas_slave_configure); void hisi_sas_scan_start(struct Scsi_Host *shost) { struct hisi_hba *hisi_hba = shost_priv(shost); hisi_hba->hw->phys_init(hisi_hba); } EXPORT_SYMBOL_GPL(hisi_sas_scan_start); int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time) { struct hisi_hba *hisi_hba = shost_priv(shost); struct sas_ha_struct *sha = &hisi_hba->sha; /* Wait for PHY up interrupt to occur */ if (time < HZ) return 0; sas_drain_work(sha); return 1; } EXPORT_SYMBOL_GPL(hisi_sas_scan_finished); static void hisi_sas_phyup_work_common(struct work_struct *work, enum hisi_sas_phy_event event) { struct hisi_sas_phy *phy = container_of(work, typeof(*phy), works[event]); struct hisi_hba *hisi_hba = phy->hisi_hba; struct asd_sas_phy *sas_phy = &phy->sas_phy; int phy_no = sas_phy->id; phy->wait_phyup_cnt = 0; if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP) hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no); hisi_sas_bytes_dmaed(hisi_hba, phy_no, GFP_KERNEL); } static void hisi_sas_phyup_work(struct work_struct *work) { hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP); } static void hisi_sas_linkreset_work(struct work_struct *work) { struct hisi_sas_phy *phy = container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]); struct asd_sas_phy *sas_phy = &phy->sas_phy; hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL); } static void hisi_sas_phyup_pm_work(struct work_struct *work) { struct hisi_sas_phy *phy = container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP_PM]); struct hisi_hba *hisi_hba = phy->hisi_hba; struct device *dev = hisi_hba->dev; hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP_PM); pm_runtime_put_sync(dev); } static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = { [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work, [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work, [HISI_PHYE_PHY_UP_PM] = hisi_sas_phyup_pm_work, }; bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy, enum hisi_sas_phy_event event) { struct hisi_hba *hisi_hba = phy->hisi_hba; if (WARN_ON(event >= HISI_PHYES_NUM)) return false; return queue_work(hisi_hba->wq, &phy->works[event]); } EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event); static void hisi_sas_wait_phyup_timedout(struct timer_list *t) { struct hisi_sas_phy *phy = from_timer(phy, t, timer); struct hisi_hba *hisi_hba = phy->hisi_hba; struct device *dev = hisi_hba->dev; int phy_no = phy->sas_phy.id; dev_warn(dev, "phy%d wait phyup timeout, issuing link reset\n", phy_no); hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); } #define HISI_SAS_WAIT_PHYUP_RETRIES 10 void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no) { struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; struct device *dev = hisi_hba->dev; unsigned long flags; dev_dbg(dev, "phy%d OOB ready\n", phy_no); spin_lock_irqsave(&phy->lock, flags); if (phy->phy_attached) { spin_unlock_irqrestore(&phy->lock, flags); return; } if (!timer_pending(&phy->timer)) { if (phy->wait_phyup_cnt < HISI_SAS_WAIT_PHYUP_RETRIES) { phy->wait_phyup_cnt++; phy->timer.expires = jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT; add_timer(&phy->timer); spin_unlock_irqrestore(&phy->lock, flags); return; } dev_warn(dev, "phy%d failed to come up %d times, giving up\n", phy_no, phy->wait_phyup_cnt); phy->wait_phyup_cnt = 0; } spin_unlock_irqrestore(&phy->lock, flags); } EXPORT_SYMBOL_GPL(hisi_sas_phy_oob_ready); static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no) { struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; struct asd_sas_phy *sas_phy = &phy->sas_phy; int i; phy->hisi_hba = hisi_hba; phy->port = NULL; phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS; phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate(); sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0; sas_phy->iproto = SAS_PROTOCOL_ALL; sas_phy->tproto = 0; sas_phy->role = PHY_ROLE_INITIATOR; sas_phy->oob_mode = OOB_NOT_CONNECTED; sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; sas_phy->id = phy_no; sas_phy->sas_addr = &hisi_hba->sas_addr[0]; sas_phy->frame_rcvd = &phy->frame_rcvd[0]; sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata; sas_phy->lldd_phy = phy; for (i = 0; i < HISI_PHYES_NUM; i++) INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]); spin_lock_init(&phy->lock); timer_setup(&phy->timer, hisi_sas_wait_phyup_timedout, 0); } /* Wrapper to ensure we track hisi_sas_phy.enable properly */ void hisi_sas_phy_enable(struct hisi_hba *hisi_hba, int phy_no, int enable) { struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; struct asd_sas_phy *aphy = &phy->sas_phy; struct sas_phy *sphy = aphy->phy; unsigned long flags; spin_lock_irqsave(&phy->lock, flags); if (enable) { /* We may have been enabled already; if so, don't touch */ if (!phy->enable) sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; hisi_hba->hw->phy_start(hisi_hba, phy_no); } else { sphy->negotiated_linkrate = SAS_PHY_DISABLED; hisi_hba->hw->phy_disable(hisi_hba, phy_no); } phy->enable = enable; spin_unlock_irqrestore(&phy->lock, flags); } EXPORT_SYMBOL_GPL(hisi_sas_phy_enable); static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy) { struct hisi_sas_phy *phy = sas_phy->lldd_phy; struct asd_sas_port *sas_port = sas_phy->port; struct hisi_sas_port *port; if (!sas_port) return; port = to_hisi_sas_port(sas_port); port->port_attached = 1; port->id = phy->port_id; phy->port = port; sas_port->lldd_port = port; } static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task, struct hisi_sas_slot *slot, bool need_lock) { if (task) { unsigned long flags; struct task_status_struct *ts; ts = &task->task_status; ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_ABORTED_TASK; spin_lock_irqsave(&task->task_state_lock, flags); task->task_state_flags &= ~SAS_TASK_STATE_PENDING; if (!slot->is_internal && task->task_proto != SAS_PROTOCOL_SMP) task->task_state_flags |= SAS_TASK_STATE_DONE; spin_unlock_irqrestore(&task->task_state_lock, flags); } hisi_sas_slot_task_free(hisi_hba, task, slot, need_lock); } static void hisi_sas_release_task(struct hisi_hba *hisi_hba, struct domain_device *device) { struct hisi_sas_slot *slot, *slot2; struct hisi_sas_device *sas_dev = device->lldd_dev; spin_lock(&sas_dev->lock); list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry) hisi_sas_do_release_task(hisi_hba, slot->task, slot, false); spin_unlock(&sas_dev->lock); } void hisi_sas_release_tasks(struct hisi_hba *hisi_hba) { struct hisi_sas_device *sas_dev; struct domain_device *device; int i; for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { sas_dev = &hisi_hba->devices[i]; device = sas_dev->sas_device; if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device) continue; hisi_sas_release_task(hisi_hba, device); } } EXPORT_SYMBOL_GPL(hisi_sas_release_tasks); static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba, struct domain_device *device) { if (hisi_hba->hw->dereg_device) hisi_hba->hw->dereg_device(hisi_hba, device); } static int hisi_sas_internal_task_abort_dev(struct hisi_sas_device *sas_dev, bool rst_ha_timeout) { struct hisi_sas_internal_abort_data data = { rst_ha_timeout }; struct domain_device *device = sas_dev->sas_device; struct hisi_hba *hisi_hba = sas_dev->hisi_hba; int i, rc; for (i = 0; i < hisi_hba->cq_nvecs; i++) { struct hisi_sas_cq *cq = &hisi_hba->cq[i]; const struct cpumask *mask = cq->irq_mask; if (mask && !cpumask_intersects(cpu_online_mask, mask)) continue; rc = sas_execute_internal_abort_dev(device, i, &data); if (rc) return rc; } return 0; } static void hisi_sas_dev_gone(struct domain_device *device) { struct hisi_sas_device *sas_dev = device->lldd_dev; struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); struct device *dev = hisi_hba->dev; int ret = 0; dev_info(dev, "dev[%d:%x] is gone\n", sas_dev->device_id, sas_dev->dev_type); down(&hisi_hba->sem); if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) { hisi_sas_internal_task_abort_dev(sas_dev, true); hisi_sas_dereg_device(hisi_hba, device); ret = hisi_hba->hw->clear_itct(hisi_hba, sas_dev); device->lldd_dev = NULL; } if (hisi_hba->hw->free_device) hisi_hba->hw->free_device(sas_dev); /* Don't mark it as SAS_PHY_UNUSED if failed to clear ITCT */ if (!ret) sas_dev->dev_type = SAS_PHY_UNUSED; sas_dev->sas_device = NULL; up(&hisi_hba->sem); } static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no, struct sas_phy_linkrates *r) { struct sas_phy_linkrates _r; struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; struct asd_sas_phy *sas_phy = &phy->sas_phy; enum sas_linkrate min, max; if (r->minimum_linkrate > SAS_LINK_RATE_1_5_GBPS) return -EINVAL; if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) { max = sas_phy->phy->maximum_linkrate; min = r->minimum_linkrate; } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) { max = r->maximum_linkrate; min = sas_phy->phy->minimum_linkrate; } else return -EINVAL; _r.maximum_linkrate = max; _r.minimum_linkrate = min; sas_phy->phy->maximum_linkrate = max; sas_phy->phy->minimum_linkrate = min; hisi_sas_phy_enable(hisi_hba, phy_no, 0); msleep(100); hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r); hisi_sas_phy_enable(hisi_hba, phy_no, 1); return 0; } static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, void *funcdata) { struct hisi_sas_phy *phy = container_of(sas_phy, struct hisi_sas_phy, sas_phy); struct sas_ha_struct *sas_ha = sas_phy->ha; struct hisi_hba *hisi_hba = sas_ha->lldd_ha; struct device *dev = hisi_hba->dev; DECLARE_COMPLETION_ONSTACK(completion); int phy_no = sas_phy->id; u8 sts = phy->phy_attached; int ret = 0; down(&hisi_hba->sem); phy->reset_completion = &completion; switch (func) { case PHY_FUNC_HARD_RESET: hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no); break; case PHY_FUNC_LINK_RESET: hisi_sas_phy_enable(hisi_hba, phy_no, 0); msleep(100); hisi_sas_phy_enable(hisi_hba, phy_no, 1); break; case PHY_FUNC_DISABLE: hisi_sas_phy_enable(hisi_hba, phy_no, 0); goto out; case PHY_FUNC_SET_LINK_RATE: ret = hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata); break; case PHY_FUNC_GET_EVENTS: if (hisi_hba->hw->get_events) { hisi_hba->hw->get_events(hisi_hba, phy_no); goto out; } fallthrough; case PHY_FUNC_RELEASE_SPINUP_HOLD: default: ret = -EOPNOTSUPP; goto out; } if (sts && !wait_for_completion_timeout(&completion, HISI_SAS_WAIT_PHYUP_TIMEOUT)) { dev_warn(dev, "phy%d wait phyup timed out for func %d\n", phy_no, func); if (phy->in_reset) ret = -ETIMEDOUT; } out: phy->reset_completion = NULL; up(&hisi_hba->sem); return ret; } static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev, bool reset, int pmp, u8 *fis) { struct ata_taskfile tf; ata_tf_init(dev, &tf); if (reset) tf.ctl |= ATA_SRST; else tf.ctl &= ~ATA_SRST; tf.command = ATA_CMD_DEV_RESET; ata_tf_to_fis(&tf, pmp, 0, fis); } static int hisi_sas_softreset_ata_disk(struct domain_device *device) { u8 fis[20] = {0}; struct ata_port *ap = device->sata_dev.ap; struct ata_link *link; int rc = TMF_RESP_FUNC_FAILED; struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); struct device *dev = hisi_hba->dev; ata_for_each_link(link, ap, EDGE) { int pmp = sata_srst_pmp(link); hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); rc = sas_execute_ata_cmd(device, fis, -1); if (rc != TMF_RESP_FUNC_COMPLETE) break; } if (rc == TMF_RESP_FUNC_COMPLETE) { ata_for_each_link(link, ap, EDGE) { int pmp = sata_srst_pmp(link); hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis); rc = sas_execute_ata_cmd(device, fis, -1); if (rc != TMF_RESP_FUNC_COMPLETE) dev_err(dev, "ata disk %016llx de-reset failed\n", SAS_ADDR(device->sas_addr)); } } else { dev_err(dev, "ata disk %016llx reset failed\n", SAS_ADDR(device->sas_addr)); } if (rc == TMF_RESP_FUNC_COMPLETE) hisi_sas_release_task(hisi_hba, device); return rc; } static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba) { u32 state = hisi_hba->hw->get_phys_state(hisi_hba); int i; for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; struct domain_device *device = sas_dev->sas_device; struct asd_sas_port *sas_port; struct hisi_sas_port *port; struct hisi_sas_phy *phy = NULL; struct asd_sas_phy *sas_phy; if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device || !device->port) continue; sas_port = device->port; port = to_hisi_sas_port(sas_port); spin_lock(&sas_port->phy_list_lock); list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) if (state & BIT(sas_phy->id)) { phy = sas_phy->lldd_phy; break; } spin_unlock(&sas_port->phy_list_lock); if (phy) { port->id = phy->port_id; /* Update linkrate of directly attached device. */ if (!device->parent) device->linkrate = phy->sas_phy.linkrate; hisi_hba->hw->setup_itct(hisi_hba, sas_dev); } else if (!port->port_attached) port->id = 0xff; } } static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state) { struct asd_sas_port *_sas_port = NULL; int phy_no; for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; struct asd_sas_phy *sas_phy = &phy->sas_phy; struct asd_sas_port *sas_port = sas_phy->port; bool do_port_check = _sas_port != sas_port; if (!sas_phy->phy->enabled) continue; /* Report PHY state change to libsas */ if (state & BIT(phy_no)) { if (do_port_check && sas_port && sas_port->port_dev) { struct domain_device *dev = sas_port->port_dev; _sas_port = sas_port; if (dev_is_expander(dev->dev_type)) sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD, GFP_KERNEL); } } else { hisi_sas_phy_down(hisi_hba, phy_no, 0, GFP_KERNEL); } } } static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba) { struct hisi_sas_device *sas_dev; struct domain_device *device; int i; for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { sas_dev = &hisi_hba->devices[i]; device = sas_dev->sas_device; if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device) continue; hisi_sas_init_device(device); } } static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba, struct asd_sas_port *sas_port, struct domain_device *device) { struct ata_port *ap = device->sata_dev.ap; struct device *dev = hisi_hba->dev; int rc = TMF_RESP_FUNC_FAILED; struct ata_link *link; u8 fis[20] = {0}; int i; for (i = 0; i < hisi_hba->n_phy; i++) { if (!(sas_port->phy_mask & BIT(i))) continue; ata_for_each_link(link, ap, EDGE) { int pmp = sata_srst_pmp(link); hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); rc = sas_execute_ata_cmd(device, fis, i); if (rc != TMF_RESP_FUNC_COMPLETE) { dev_err(dev, "phy%d ata reset failed rc=%d\n", i, rc); break; } } } } static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba) { struct device *dev = hisi_hba->dev; int port_no, rc, i; for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; struct domain_device *device = sas_dev->sas_device; if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device) continue; rc = hisi_sas_internal_task_abort_dev(sas_dev, false); if (rc < 0) dev_err(dev, "STP reject: abort dev failed %d\n", rc); } for (port_no = 0; port_no < hisi_hba->n_phy; port_no++) { struct hisi_sas_port *port = &hisi_hba->port[port_no]; struct asd_sas_port *sas_port = &port->sas_port; struct domain_device *port_dev = sas_port->port_dev; struct domain_device *device; if (!port_dev || !dev_is_expander(port_dev->dev_type)) continue; /* Try to find a SATA device */ list_for_each_entry(device, &sas_port->dev_list, dev_list_node) { if (dev_is_sata(device)) { hisi_sas_send_ata_reset_each_phy(hisi_hba, sas_port, device); break; } } } } void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba) { struct Scsi_Host *shost = hisi_hba->shost; hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba); scsi_block_requests(shost); hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000); del_timer_sync(&hisi_hba->timer); set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); } EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare); static void hisi_sas_async_init_wait_phyup(void *data, async_cookie_t cookie) { struct hisi_sas_phy *phy = data; struct hisi_hba *hisi_hba = phy->hisi_hba; struct device *dev = hisi_hba->dev; DECLARE_COMPLETION_ONSTACK(completion); int phy_no = phy->sas_phy.id; phy->reset_completion = &completion; hisi_sas_phy_enable(hisi_hba, phy_no, 1); if (!wait_for_completion_timeout(&completion, HISI_SAS_WAIT_PHYUP_TIMEOUT)) dev_warn(dev, "phy%d wait phyup timed out\n", phy_no); phy->reset_completion = NULL; } void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba) { struct Scsi_Host *shost = hisi_hba->shost; ASYNC_DOMAIN_EXCLUSIVE(async); int phy_no; /* Init and wait for PHYs to come up and all libsas event finished. */ for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; if (!(hisi_hba->phy_state & BIT(phy_no))) continue; async_schedule_domain(hisi_sas_async_init_wait_phyup, phy, &async); } async_synchronize_full_domain(&async); hisi_sas_refresh_port_id(hisi_hba); clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); if (hisi_hba->reject_stp_links_msk) hisi_sas_terminate_stp_reject(hisi_hba); hisi_sas_reset_init_all_devices(hisi_hba); scsi_unblock_requests(shost); clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); up(&hisi_hba->sem); hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state); } EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done); static int hisi_sas_controller_prereset(struct hisi_hba *hisi_hba) { if (!hisi_hba->hw->soft_reset) return -1; down(&hisi_hba->sem); if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) { up(&hisi_hba->sem); return -1; } if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct) hisi_hba->hw->debugfs_snapshot_regs(hisi_hba); return 0; } static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba) { struct device *dev = hisi_hba->dev; struct Scsi_Host *shost = hisi_hba->shost; int rc; dev_info(dev, "controller resetting...\n"); hisi_sas_controller_reset_prepare(hisi_hba); rc = hisi_hba->hw->soft_reset(hisi_hba); if (rc) { dev_warn(dev, "controller reset failed (%d)\n", rc); clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); up(&hisi_hba->sem); scsi_unblock_requests(shost); clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); return rc; } clear_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags); hisi_sas_controller_reset_done(hisi_hba); dev_info(dev, "controller reset complete\n"); return 0; } static int hisi_sas_abort_task(struct sas_task *task) { struct hisi_sas_internal_abort_data internal_abort_data = { false }; struct domain_device *device = task->dev; struct hisi_sas_device *sas_dev = device->lldd_dev; struct hisi_sas_slot *slot = task->lldd_task; struct hisi_hba *hisi_hba; struct device *dev; int rc = TMF_RESP_FUNC_FAILED; unsigned long flags; if (!sas_dev) return TMF_RESP_FUNC_FAILED; hisi_hba = dev_to_hisi_hba(task->dev); dev = hisi_hba->dev; spin_lock_irqsave(&task->task_state_lock, flags); if (task->task_state_flags & SAS_TASK_STATE_DONE) { struct hisi_sas_cq *cq; if (slot) { /* * sync irq or poll queue to avoid free'ing task * before using task in IO completion */ cq = &hisi_hba->cq[slot->dlvry_queue]; hisi_sas_sync_cq(cq); } spin_unlock_irqrestore(&task->task_state_lock, flags); rc = TMF_RESP_FUNC_COMPLETE; goto out; } task->task_state_flags |= SAS_TASK_STATE_ABORTED; spin_unlock_irqrestore(&task->task_state_lock, flags); if (slot && task->task_proto & SAS_PROTOCOL_SSP) { u16 tag = slot->idx; int rc2; rc = sas_abort_task(task, tag); rc2 = sas_execute_internal_abort_single(device, tag, slot->dlvry_queue, &internal_abort_data); if (rc2 < 0) { dev_err(dev, "abort task: internal abort (%d)\n", rc2); return TMF_RESP_FUNC_FAILED; } /* * If the TMF finds that the IO is not in the device and also * the internal abort does not succeed, then it is safe to * free the slot. * Note: if the internal abort succeeds then the slot * will have already been completed */ if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) { if (task->lldd_task) hisi_sas_do_release_task(hisi_hba, task, slot, true); } } else if (task->task_proto & SAS_PROTOCOL_SATA || task->task_proto & SAS_PROTOCOL_STP) { if (task->dev->dev_type == SAS_SATA_DEV) { struct ata_queued_cmd *qc = task->uldd_task; rc = hisi_sas_internal_task_abort_dev(sas_dev, false); if (rc < 0) { dev_err(dev, "abort task: internal abort failed\n"); goto out; } hisi_sas_dereg_device(hisi_hba, device); /* * If an ATA internal command times out in ATA EH, it * need to execute soft reset, so check the scsicmd */ if ((sas_dev->dev_status == HISI_SAS_DEV_NCQ_ERR) && qc && qc->scsicmd) { hisi_sas_do_release_task(hisi_hba, task, slot, true); rc = TMF_RESP_FUNC_COMPLETE; } else { rc = hisi_sas_softreset_ata_disk(device); } } } else if (slot && task->task_proto & SAS_PROTOCOL_SMP) { /* SMP */ u32 tag = slot->idx; struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue]; rc = sas_execute_internal_abort_single(device, tag, slot->dlvry_queue, &internal_abort_data); if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) && task->lldd_task) { /* * sync irq or poll queue to avoid free'ing task * before using task in IO completion */ hisi_sas_sync_cq(cq); slot->task = NULL; } } out: if (rc != TMF_RESP_FUNC_COMPLETE) dev_notice(dev, "abort task: rc=%d\n", rc); return rc; } static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun) { struct hisi_sas_device *sas_dev = device->lldd_dev; struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); struct device *dev = hisi_hba->dev; int rc; rc = hisi_sas_internal_task_abort_dev(sas_dev, false); if (rc < 0) { dev_err(dev, "abort task set: internal abort rc=%d\n", rc); return TMF_RESP_FUNC_FAILED; } hisi_sas_dereg_device(hisi_hba, device); rc = sas_abort_task_set(device, lun); if (rc == TMF_RESP_FUNC_COMPLETE) hisi_sas_release_task(hisi_hba, device); return rc; } static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device) { struct sas_phy *local_phy = sas_get_local_phy(device); struct hisi_sas_device *sas_dev = device->lldd_dev; struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); struct sas_ha_struct *sas_ha = &hisi_hba->sha; int rc, reset_type; if (!local_phy->enabled) { sas_put_local_phy(local_phy); return -ENODEV; } if (scsi_is_sas_phy_local(local_phy)) { struct asd_sas_phy *sas_phy = sas_ha->sas_phy[local_phy->number]; struct hisi_sas_phy *phy = container_of(sas_phy, struct hisi_sas_phy, sas_phy); unsigned long flags; spin_lock_irqsave(&phy->lock, flags); phy->in_reset = 1; spin_unlock_irqrestore(&phy->lock, flags); } reset_type = (sas_dev->dev_status == HISI_SAS_DEV_INIT || !dev_is_sata(device)) ? true : false; rc = sas_phy_reset(local_phy, reset_type); sas_put_local_phy(local_phy); if (scsi_is_sas_phy_local(local_phy)) { struct asd_sas_phy *sas_phy = sas_ha->sas_phy[local_phy->number]; struct hisi_sas_phy *phy = container_of(sas_phy, struct hisi_sas_phy, sas_phy); unsigned long flags; spin_lock_irqsave(&phy->lock, flags); phy->in_reset = 0; spin_unlock_irqrestore(&phy->lock, flags); /* report PHY down if timed out */ if (rc == -ETIMEDOUT) hisi_sas_phy_down(hisi_hba, sas_phy->id, 0, GFP_KERNEL); return rc; } /* Remote phy */ if (rc) return rc; if (dev_is_sata(device)) { struct ata_link *link = &device->sata_dev.ap->link; rc = ata_wait_after_reset(link, HISI_SAS_WAIT_PHYUP_TIMEOUT, smp_ata_check_ready_type); } else { msleep(2000); } return rc; } static int hisi_sas_I_T_nexus_reset(struct domain_device *device) { struct hisi_sas_device *sas_dev = device->lldd_dev; struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); struct device *dev = hisi_hba->dev; int rc; if (sas_dev->dev_status == HISI_SAS_DEV_NCQ_ERR) sas_dev->dev_status = HISI_SAS_DEV_NORMAL; rc = hisi_sas_internal_task_abort_dev(sas_dev, false); if (rc < 0) { dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc); return TMF_RESP_FUNC_FAILED; } hisi_sas_dereg_device(hisi_hba, device); rc = hisi_sas_debug_I_T_nexus_reset(device); if (rc == TMF_RESP_FUNC_COMPLETE && dev_is_sata(device)) { struct sas_phy *local_phy; rc = hisi_sas_softreset_ata_disk(device); switch (rc) { case -ECOMM: rc = -ENODEV; break; case TMF_RESP_FUNC_FAILED: case -EMSGSIZE: case -EIO: local_phy = sas_get_local_phy(device); rc = sas_phy_enable(local_phy, 0); if (!rc) { local_phy->enabled = 0; dev_err(dev, "Disabled local phy of ATA disk %016llx due to softreset fail (%d)\n", SAS_ADDR(device->sas_addr), rc); rc = -ENODEV; } sas_put_local_phy(local_phy); break; default: break; } } if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) hisi_sas_release_task(hisi_hba, device); return rc; } static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun) { struct hisi_sas_device *sas_dev = device->lldd_dev; struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); struct device *dev = hisi_hba->dev; int rc = TMF_RESP_FUNC_FAILED; /* Clear internal IO and then lu reset */ rc = hisi_sas_internal_task_abort_dev(sas_dev, false); if (rc < 0) { dev_err(dev, "lu_reset: internal abort failed\n"); goto out; } hisi_sas_dereg_device(hisi_hba, device); if (dev_is_sata(device)) { struct sas_phy *phy; phy = sas_get_local_phy(device); rc = sas_phy_reset(phy, true); if (rc == 0) hisi_sas_release_task(hisi_hba, device); sas_put_local_phy(phy); } else { rc = sas_lu_reset(device, lun); if (rc == TMF_RESP_FUNC_COMPLETE) hisi_sas_release_task(hisi_hba, device); } out: if (rc != TMF_RESP_FUNC_COMPLETE) dev_err(dev, "lu_reset: for device[%d]:rc= %d\n", sas_dev->device_id, rc); return rc; } static void hisi_sas_async_I_T_nexus_reset(void *data, async_cookie_t cookie) { struct domain_device *device = data; struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); int rc; rc = hisi_sas_debug_I_T_nexus_reset(device); if (rc != TMF_RESP_FUNC_COMPLETE) dev_info(hisi_hba->dev, "I_T_nexus reset fail for dev:%016llx rc=%d\n", SAS_ADDR(device->sas_addr), rc); } static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha) { struct hisi_hba *hisi_hba = sas_ha->lldd_ha; HISI_SAS_DECLARE_RST_WORK_ON_STACK(r); ASYNC_DOMAIN_EXCLUSIVE(async); int i; queue_work(hisi_hba->wq, &r.work); wait_for_completion(r.completion); if (!r.done) return TMF_RESP_FUNC_FAILED; for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; struct domain_device *device = sas_dev->sas_device; if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device || dev_is_expander(device->dev_type)) continue; async_schedule_domain(hisi_sas_async_I_T_nexus_reset, device, &async); } async_synchronize_full_domain(&async); hisi_sas_release_tasks(hisi_hba); return TMF_RESP_FUNC_COMPLETE; } static int hisi_sas_query_task(struct sas_task *task) { int rc = TMF_RESP_FUNC_FAILED; if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { struct hisi_sas_slot *slot = task->lldd_task; u32 tag = slot->idx; rc = sas_query_task(task, tag); switch (rc) { /* The task is still in Lun, release it then */ case TMF_RESP_FUNC_SUCC: /* The task is not in Lun or failed, reset the phy */ case TMF_RESP_FUNC_FAILED: case TMF_RESP_FUNC_COMPLETE: break; default: rc = TMF_RESP_FUNC_FAILED; break; } } return rc; } static bool hisi_sas_internal_abort_timeout(struct sas_task *task, void *data) { struct domain_device *device = task->dev; struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); struct hisi_sas_internal_abort_data *timeout = data; if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct) queue_work(hisi_hba->wq, &hisi_hba->debugfs_work); if (task->task_state_flags & SAS_TASK_STATE_DONE) { pr_err("Internal abort: timeout %016llx\n", SAS_ADDR(device->sas_addr)); } else { struct hisi_sas_slot *slot = task->lldd_task; set_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags); if (slot) { struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue]; /* * sync irq or poll queue to avoid free'ing task * before using task in IO completion */ hisi_sas_sync_cq(cq); slot->task = NULL; } if (timeout->rst_ha_timeout) { pr_err("Internal abort: timeout and not done %016llx. Queuing reset.\n", SAS_ADDR(device->sas_addr)); queue_work(hisi_hba->wq, &hisi_hba->rst_work); } else { pr_err("Internal abort: timeout and not done %016llx.\n", SAS_ADDR(device->sas_addr)); } return true; } return false; } static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy) { hisi_sas_port_notify_formed(sas_phy); } static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type, u8 reg_index, u8 reg_count, u8 *write_data) { struct hisi_hba *hisi_hba = sha->lldd_ha; if (!hisi_hba->hw->write_gpio) return -EOPNOTSUPP; return hisi_hba->hw->write_gpio(hisi_hba, reg_type, reg_index, reg_count, write_data); } static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy) { struct asd_sas_phy *sas_phy = &phy->sas_phy; struct sas_phy *sphy = sas_phy->phy; unsigned long flags; phy->phy_attached = 0; phy->phy_type = 0; phy->port = NULL; spin_lock_irqsave(&phy->lock, flags); if (phy->enable) sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; else sphy->negotiated_linkrate = SAS_PHY_DISABLED; spin_unlock_irqrestore(&phy->lock, flags); } void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy, gfp_t gfp_flags) { struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; struct asd_sas_phy *sas_phy = &phy->sas_phy; struct device *dev = hisi_hba->dev; if (rdy) { /* Phy down but ready */ hisi_sas_bytes_dmaed(hisi_hba, phy_no, gfp_flags); hisi_sas_port_notify_formed(sas_phy); } else { struct hisi_sas_port *port = phy->port; if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags) || phy->in_reset) { dev_info(dev, "ignore flutter phy%d down\n", phy_no); return; } /* Phy down and not ready */ sas_notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL, gfp_flags); sas_phy_disconnected(sas_phy); if (port) { if (phy->phy_type & PORT_TYPE_SAS) { int port_id = port->id; if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba, port_id)) port->port_attached = 0; } else if (phy->phy_type & PORT_TYPE_SATA) port->port_attached = 0; } hisi_sas_phy_disconnected(phy); } } EXPORT_SYMBOL_GPL(hisi_sas_phy_down); void hisi_sas_phy_bcast(struct hisi_sas_phy *phy) { struct asd_sas_phy *sas_phy = &phy->sas_phy; struct hisi_hba *hisi_hba = phy->hisi_hba; if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) return; sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD, GFP_ATOMIC); } EXPORT_SYMBOL_GPL(hisi_sas_phy_bcast); int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type) { struct hisi_hba *hisi_hba = shost_priv(shost); if (reset_type != SCSI_ADAPTER_RESET) return -EOPNOTSUPP; queue_work(hisi_hba->wq, &hisi_hba->rst_work); return 0; } EXPORT_SYMBOL_GPL(hisi_sas_host_reset); struct scsi_transport_template *hisi_sas_stt; EXPORT_SYMBOL_GPL(hisi_sas_stt); static struct sas_domain_function_template hisi_sas_transport_ops = { .lldd_dev_found = hisi_sas_dev_found, .lldd_dev_gone = hisi_sas_dev_gone, .lldd_execute_task = hisi_sas_queue_command, .lldd_control_phy = hisi_sas_control_phy, .lldd_abort_task = hisi_sas_abort_task, .lldd_abort_task_set = hisi_sas_abort_task_set, .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset, .lldd_lu_reset = hisi_sas_lu_reset, .lldd_query_task = hisi_sas_query_task, .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha, .lldd_port_formed = hisi_sas_port_formed, .lldd_write_gpio = hisi_sas_write_gpio, .lldd_tmf_aborted = hisi_sas_tmf_aborted, .lldd_abort_timeout = hisi_sas_internal_abort_timeout, }; void hisi_sas_init_mem(struct hisi_hba *hisi_hba) { int i, s, j, max_command_entries = HISI_SAS_MAX_COMMANDS; struct hisi_sas_breakpoint *sata_breakpoint = hisi_hba->sata_breakpoint; for (i = 0; i < hisi_hba->queue_count; i++) { struct hisi_sas_cq *cq = &hisi_hba->cq[i]; struct hisi_sas_dq *dq = &hisi_hba->dq[i]; struct hisi_sas_cmd_hdr *cmd_hdr = hisi_hba->cmd_hdr[i]; s = sizeof(struct hisi_sas_cmd_hdr); for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++) memset(&cmd_hdr[j], 0, s); dq->wr_point = 0; s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; memset(hisi_hba->complete_hdr[i], 0, s); cq->rd_point = 0; } s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy; memset(hisi_hba->initial_fis, 0, s); s = max_command_entries * sizeof(struct hisi_sas_iost); memset(hisi_hba->iost, 0, s); s = max_command_entries * sizeof(struct hisi_sas_breakpoint); memset(hisi_hba->breakpoint, 0, s); s = sizeof(struct hisi_sas_sata_breakpoint); for (j = 0; j < HISI_SAS_MAX_ITCT_ENTRIES; j++) memset(&sata_breakpoint[j], 0, s); } EXPORT_SYMBOL_GPL(hisi_sas_init_mem); int hisi_sas_alloc(struct hisi_hba *hisi_hba) { struct device *dev = hisi_hba->dev; int i, j, s, max_command_entries = HISI_SAS_MAX_COMMANDS; int max_command_entries_ru, sz_slot_buf_ru; int blk_cnt, slots_per_blk; sema_init(&hisi_hba->sem, 1); spin_lock_init(&hisi_hba->lock); for (i = 0; i < hisi_hba->n_phy; i++) { hisi_sas_phy_init(hisi_hba, i); hisi_hba->port[i].port_attached = 0; hisi_hba->port[i].id = -1; } for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED; hisi_hba->devices[i].device_id = i; hisi_hba->devices[i].dev_status = HISI_SAS_DEV_INIT; } for (i = 0; i < hisi_hba->queue_count; i++) { struct hisi_sas_cq *cq = &hisi_hba->cq[i]; struct hisi_sas_dq *dq = &hisi_hba->dq[i]; /* Completion queue structure */ cq->id = i; cq->hisi_hba = hisi_hba; spin_lock_init(&cq->poll_lock); /* Delivery queue structure */ spin_lock_init(&dq->lock); INIT_LIST_HEAD(&dq->list); dq->id = i; dq->hisi_hba = hisi_hba; /* Delivery queue */ s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; hisi_hba->cmd_hdr[i] = dmam_alloc_coherent(dev, s, &hisi_hba->cmd_hdr_dma[i], GFP_KERNEL); if (!hisi_hba->cmd_hdr[i]) goto err_out; /* Completion queue */ s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; hisi_hba->complete_hdr[i] = dmam_alloc_coherent(dev, s, &hisi_hba->complete_hdr_dma[i], GFP_KERNEL); if (!hisi_hba->complete_hdr[i]) goto err_out; } s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma, GFP_KERNEL); if (!hisi_hba->itct) goto err_out; hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries, sizeof(struct hisi_sas_slot), GFP_KERNEL); if (!hisi_hba->slot_info) goto err_out; /* roundup to avoid overly large block size */ max_command_entries_ru = roundup(max_command_entries, 64); if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK) sz_slot_buf_ru = sizeof(struct hisi_sas_slot_dif_buf_table); else sz_slot_buf_ru = sizeof(struct hisi_sas_slot_buf_table); sz_slot_buf_ru = roundup(sz_slot_buf_ru, 64); s = max(lcm(max_command_entries_ru, sz_slot_buf_ru), PAGE_SIZE); blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s; slots_per_blk = s / sz_slot_buf_ru; for (i = 0; i < blk_cnt; i++) { int slot_index = i * slots_per_blk; dma_addr_t buf_dma; void *buf; buf = dmam_alloc_coherent(dev, s, &buf_dma, GFP_KERNEL); if (!buf) goto err_out; for (j = 0; j < slots_per_blk; j++, slot_index++) { struct hisi_sas_slot *slot; slot = &hisi_hba->slot_info[slot_index]; slot->buf = buf; slot->buf_dma = buf_dma; slot->idx = slot_index; buf += sz_slot_buf_ru; buf_dma += sz_slot_buf_ru; } } s = max_command_entries * sizeof(struct hisi_sas_iost); hisi_hba->iost = dmam_alloc_coherent(dev, s, &hisi_hba->iost_dma, GFP_KERNEL); if (!hisi_hba->iost) goto err_out; s = max_command_entries * sizeof(struct hisi_sas_breakpoint); hisi_hba->breakpoint = dmam_alloc_coherent(dev, s, &hisi_hba->breakpoint_dma, GFP_KERNEL); if (!hisi_hba->breakpoint) goto err_out; s = hisi_hba->slot_index_count = max_command_entries; hisi_hba->slot_index_tags = devm_bitmap_zalloc(dev, s, GFP_KERNEL); if (!hisi_hba->slot_index_tags) goto err_out; s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS; hisi_hba->initial_fis = dmam_alloc_coherent(dev, s, &hisi_hba->initial_fis_dma, GFP_KERNEL); if (!hisi_hba->initial_fis) goto err_out; s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint); hisi_hba->sata_breakpoint = dmam_alloc_coherent(dev, s, &hisi_hba->sata_breakpoint_dma, GFP_KERNEL); if (!hisi_hba->sata_breakpoint) goto err_out; hisi_hba->last_slot_index = 0; hisi_hba->wq = create_singlethread_workqueue(dev_name(dev)); if (!hisi_hba->wq) { dev_err(dev, "sas_alloc: failed to create workqueue\n"); goto err_out; } return 0; err_out: return -ENOMEM; } EXPORT_SYMBOL_GPL(hisi_sas_alloc); void hisi_sas_free(struct hisi_hba *hisi_hba) { int i; for (i = 0; i < hisi_hba->n_phy; i++) { struct hisi_sas_phy *phy = &hisi_hba->phy[i]; del_timer_sync(&phy->timer); } if (hisi_hba->wq) destroy_workqueue(hisi_hba->wq); } EXPORT_SYMBOL_GPL(hisi_sas_free); void hisi_sas_rst_work_handler(struct work_struct *work) { struct hisi_hba *hisi_hba = container_of(work, struct hisi_hba, rst_work); if (hisi_sas_controller_prereset(hisi_hba)) return; hisi_sas_controller_reset(hisi_hba); } EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler); void hisi_sas_sync_rst_work_handler(struct work_struct *work) { struct hisi_sas_rst *rst = container_of(work, struct hisi_sas_rst, work); if (hisi_sas_controller_prereset(rst->hisi_hba)) goto rst_complete; if (!hisi_sas_controller_reset(rst->hisi_hba)) rst->done = true; rst_complete: complete(rst->completion); } EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler); int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba) { struct device *dev = hisi_hba->dev; struct platform_device *pdev = hisi_hba->platform_dev; struct device_node *np = pdev ? pdev->dev.of_node : NULL; struct clk *refclk; if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr, SAS_ADDR_SIZE)) { dev_err(dev, "could not get property sas-addr\n"); return -ENOENT; } if (np) { /* * These properties are only required for platform device-based * controller with DT firmware. */ hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np, "hisilicon,sas-syscon"); if (IS_ERR(hisi_hba->ctrl)) { dev_err(dev, "could not get syscon\n"); return -ENOENT; } if (device_property_read_u32(dev, "ctrl-reset-reg", &hisi_hba->ctrl_reset_reg)) { dev_err(dev, "could not get property ctrl-reset-reg\n"); return -ENOENT; } if (device_property_read_u32(dev, "ctrl-reset-sts-reg", &hisi_hba->ctrl_reset_sts_reg)) { dev_err(dev, "could not get property ctrl-reset-sts-reg\n"); return -ENOENT; } if (device_property_read_u32(dev, "ctrl-clock-ena-reg", &hisi_hba->ctrl_clock_ena_reg)) { dev_err(dev, "could not get property ctrl-clock-ena-reg\n"); return -ENOENT; } } refclk = devm_clk_get(dev, NULL); if (IS_ERR(refclk)) dev_dbg(dev, "no ref clk property\n"); else hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000; if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) { dev_err(dev, "could not get property phy-count\n"); return -ENOENT; } if (device_property_read_u32(dev, "queue-count", &hisi_hba->queue_count)) { dev_err(dev, "could not get property queue-count\n"); return -ENOENT; } return 0; } EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info); static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, const struct hisi_sas_hw *hw) { struct resource *res; struct Scsi_Host *shost; struct hisi_hba *hisi_hba; struct device *dev = &pdev->dev; int error; shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba)); if (!shost) { dev_err(dev, "scsi host alloc failed\n"); return NULL; } hisi_hba = shost_priv(shost); INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler); hisi_hba->hw = hw; hisi_hba->dev = dev; hisi_hba->platform_dev = pdev; hisi_hba->shost = shost; SHOST_TO_SAS_HA(shost) = &hisi_hba->sha; timer_setup(&hisi_hba->timer, NULL, 0); if (hisi_sas_get_fw_info(hisi_hba) < 0) goto err_out; error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); if (error) { dev_err(dev, "No usable DMA addressing method\n"); goto err_out; } hisi_hba->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(hisi_hba->regs)) goto err_out; res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (res) { hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res); if (IS_ERR(hisi_hba->sgpio_regs)) goto err_out; } if (hisi_sas_alloc(hisi_hba)) { hisi_sas_free(hisi_hba); goto err_out; } return shost; err_out: scsi_host_put(shost); dev_err(dev, "shost alloc failed\n"); return NULL; } static int hisi_sas_interrupt_preinit(struct hisi_hba *hisi_hba) { if (hisi_hba->hw->interrupt_preinit) return hisi_hba->hw->interrupt_preinit(hisi_hba); return 0; } int hisi_sas_probe(struct platform_device *pdev, const struct hisi_sas_hw *hw) { struct Scsi_Host *shost; struct hisi_hba *hisi_hba; struct device *dev = &pdev->dev; struct asd_sas_phy **arr_phy; struct asd_sas_port **arr_port; struct sas_ha_struct *sha; int rc, phy_nr, port_nr, i; shost = hisi_sas_shost_alloc(pdev, hw); if (!shost) return -ENOMEM; sha = SHOST_TO_SAS_HA(shost); hisi_hba = shost_priv(shost); platform_set_drvdata(pdev, sha); phy_nr = port_nr = hisi_hba->n_phy; arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL); arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL); if (!arr_phy || !arr_port) { rc = -ENOMEM; goto err_out_ha; } sha->sas_phy = arr_phy; sha->sas_port = arr_port; sha->lldd_ha = hisi_hba; shost->transportt = hisi_sas_stt; shost->max_id = HISI_SAS_MAX_DEVICES; shost->max_lun = ~0; shost->max_channel = 1; shost->max_cmd_len = 16; if (hisi_hba->hw->slot_index_alloc) { shost->can_queue = HISI_SAS_MAX_COMMANDS; shost->cmd_per_lun = HISI_SAS_MAX_COMMANDS; } else { shost->can_queue = HISI_SAS_UNRESERVED_IPTT; shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT; } sha->sas_ha_name = DRV_NAME; sha->dev = hisi_hba->dev; sha->sas_addr = &hisi_hba->sas_addr[0]; sha->num_phys = hisi_hba->n_phy; sha->shost = hisi_hba->shost; for (i = 0; i < hisi_hba->n_phy; i++) { sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy; sha->sas_port[i] = &hisi_hba->port[i].sas_port; } rc = hisi_sas_interrupt_preinit(hisi_hba); if (rc) goto err_out_ha; rc = scsi_add_host(shost, &pdev->dev); if (rc) goto err_out_ha; rc = sas_register_ha(sha); if (rc) goto err_out_register_ha; rc = hisi_hba->hw->hw_init(hisi_hba); if (rc) goto err_out_hw_init; scsi_scan_host(shost); return 0; err_out_hw_init: sas_unregister_ha(sha); err_out_register_ha: scsi_remove_host(shost); err_out_ha: hisi_sas_free(hisi_hba); scsi_host_put(shost); return rc; } EXPORT_SYMBOL_GPL(hisi_sas_probe); void hisi_sas_remove(struct platform_device *pdev) { struct sas_ha_struct *sha = platform_get_drvdata(pdev); struct hisi_hba *hisi_hba = sha->lldd_ha; struct Scsi_Host *shost = sha->shost; del_timer_sync(&hisi_hba->timer); sas_unregister_ha(sha); sas_remove_host(shost); hisi_sas_free(hisi_hba); scsi_host_put(shost); } EXPORT_SYMBOL_GPL(hisi_sas_remove); #if IS_ENABLED(CONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE) #define DEBUGFS_ENABLE_DEFAULT "enabled" bool hisi_sas_debugfs_enable = true; u32 hisi_sas_debugfs_dump_count = 50; #else #define DEBUGFS_ENABLE_DEFAULT "disabled" bool hisi_sas_debugfs_enable; u32 hisi_sas_debugfs_dump_count = 1; #endif EXPORT_SYMBOL_GPL(hisi_sas_debugfs_enable); module_param_named(debugfs_enable, hisi_sas_debugfs_enable, bool, 0444); MODULE_PARM_DESC(hisi_sas_debugfs_enable, "Enable driver debugfs (default "DEBUGFS_ENABLE_DEFAULT")"); EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dump_count); module_param_named(debugfs_dump_count, hisi_sas_debugfs_dump_count, uint, 0444); MODULE_PARM_DESC(hisi_sas_debugfs_dump_count, "Number of debugfs dumps to allow"); struct dentry *hisi_sas_debugfs_dir; EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dir); static __init int hisi_sas_init(void) { hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops); if (!hisi_sas_stt) return -ENOMEM; if (hisi_sas_debugfs_enable) { hisi_sas_debugfs_dir = debugfs_create_dir("hisi_sas", NULL); if (hisi_sas_debugfs_dump_count > HISI_SAS_MAX_DEBUGFS_DUMP) { pr_info("hisi_sas: Limiting debugfs dump count\n"); hisi_sas_debugfs_dump_count = HISI_SAS_MAX_DEBUGFS_DUMP; } } return 0; } static __exit void hisi_sas_exit(void) { sas_release_transport(hisi_sas_stt); debugfs_remove(hisi_sas_debugfs_dir); } module_init(hisi_sas_init); module_exit(hisi_sas_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("John Garry <[email protected]>"); MODULE_DESCRIPTION("HISILICON SAS controller driver"); MODULE_ALIAS("platform:" DRV_NAME);
linux-master
drivers/scsi/hisi_sas/hisi_sas_main.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2016 Linaro Ltd. * Copyright (c) 2016 Hisilicon Limited. */ #include "hisi_sas.h" #define DRV_NAME "hisi_sas_v2_hw" /* global registers need init*/ #define DLVRY_QUEUE_ENABLE 0x0 #define IOST_BASE_ADDR_LO 0x8 #define IOST_BASE_ADDR_HI 0xc #define ITCT_BASE_ADDR_LO 0x10 #define ITCT_BASE_ADDR_HI 0x14 #define IO_BROKEN_MSG_ADDR_LO 0x18 #define IO_BROKEN_MSG_ADDR_HI 0x1c #define PHY_CONTEXT 0x20 #define PHY_STATE 0x24 #define PHY_PORT_NUM_MA 0x28 #define PORT_STATE 0x2c #define PORT_STATE_PHY8_PORT_NUM_OFF 16 #define PORT_STATE_PHY8_PORT_NUM_MSK (0xf << PORT_STATE_PHY8_PORT_NUM_OFF) #define PORT_STATE_PHY8_CONN_RATE_OFF 20 #define PORT_STATE_PHY8_CONN_RATE_MSK (0xf << PORT_STATE_PHY8_CONN_RATE_OFF) #define PHY_CONN_RATE 0x30 #define HGC_TRANS_TASK_CNT_LIMIT 0x38 #define AXI_AHB_CLK_CFG 0x3c #define ITCT_CLR 0x44 #define ITCT_CLR_EN_OFF 16 #define ITCT_CLR_EN_MSK (0x1 << ITCT_CLR_EN_OFF) #define ITCT_DEV_OFF 0 #define ITCT_DEV_MSK (0x7ff << ITCT_DEV_OFF) #define AXI_USER1 0x48 #define AXI_USER2 0x4c #define IO_SATA_BROKEN_MSG_ADDR_LO 0x58 #define IO_SATA_BROKEN_MSG_ADDR_HI 0x5c #define SATA_INITI_D2H_STORE_ADDR_LO 0x60 #define SATA_INITI_D2H_STORE_ADDR_HI 0x64 #define HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL 0x84 #define HGC_SAS_TXFAIL_RETRY_CTRL 0x88 #define HGC_GET_ITV_TIME 0x90 #define DEVICE_MSG_WORK_MODE 0x94 #define OPENA_WT_CONTI_TIME 0x9c #define I_T_NEXUS_LOSS_TIME 0xa0 #define MAX_CON_TIME_LIMIT_TIME 0xa4 #define BUS_INACTIVE_LIMIT_TIME 0xa8 #define REJECT_TO_OPEN_LIMIT_TIME 0xac #define CFG_AGING_TIME 0xbc #define HGC_DFX_CFG2 0xc0 #define HGC_IOMB_PROC1_STATUS 0x104 #define CFG_1US_TIMER_TRSH 0xcc #define HGC_LM_DFX_STATUS2 0x128 #define HGC_LM_DFX_STATUS2_IOSTLIST_OFF 0 #define HGC_LM_DFX_STATUS2_IOSTLIST_MSK (0xfff << \ HGC_LM_DFX_STATUS2_IOSTLIST_OFF) #define HGC_LM_DFX_STATUS2_ITCTLIST_OFF 12 #define HGC_LM_DFX_STATUS2_ITCTLIST_MSK (0x7ff << \ HGC_LM_DFX_STATUS2_ITCTLIST_OFF) #define HGC_CQE_ECC_ADDR 0x13c #define HGC_CQE_ECC_1B_ADDR_OFF 0 #define HGC_CQE_ECC_1B_ADDR_MSK (0x3f << HGC_CQE_ECC_1B_ADDR_OFF) #define HGC_CQE_ECC_MB_ADDR_OFF 8 #define HGC_CQE_ECC_MB_ADDR_MSK (0x3f << HGC_CQE_ECC_MB_ADDR_OFF) #define HGC_IOST_ECC_ADDR 0x140 #define HGC_IOST_ECC_1B_ADDR_OFF 0 #define HGC_IOST_ECC_1B_ADDR_MSK (0x3ff << HGC_IOST_ECC_1B_ADDR_OFF) #define HGC_IOST_ECC_MB_ADDR_OFF 16 #define HGC_IOST_ECC_MB_ADDR_MSK (0x3ff << HGC_IOST_ECC_MB_ADDR_OFF) #define HGC_DQE_ECC_ADDR 0x144 #define HGC_DQE_ECC_1B_ADDR_OFF 0 #define HGC_DQE_ECC_1B_ADDR_MSK (0xfff << HGC_DQE_ECC_1B_ADDR_OFF) #define HGC_DQE_ECC_MB_ADDR_OFF 16 #define HGC_DQE_ECC_MB_ADDR_MSK (0xfff << HGC_DQE_ECC_MB_ADDR_OFF) #define HGC_INVLD_DQE_INFO 0x148 #define HGC_INVLD_DQE_INFO_FB_CH0_OFF 9 #define HGC_INVLD_DQE_INFO_FB_CH0_MSK (0x1 << HGC_INVLD_DQE_INFO_FB_CH0_OFF) #define HGC_INVLD_DQE_INFO_FB_CH3_OFF 18 #define HGC_ITCT_ECC_ADDR 0x150 #define HGC_ITCT_ECC_1B_ADDR_OFF 0 #define HGC_ITCT_ECC_1B_ADDR_MSK (0x3ff << \ HGC_ITCT_ECC_1B_ADDR_OFF) #define HGC_ITCT_ECC_MB_ADDR_OFF 16 #define HGC_ITCT_ECC_MB_ADDR_MSK (0x3ff << \ HGC_ITCT_ECC_MB_ADDR_OFF) #define HGC_AXI_FIFO_ERR_INFO 0x154 #define AXI_ERR_INFO_OFF 0 #define AXI_ERR_INFO_MSK (0xff << AXI_ERR_INFO_OFF) #define FIFO_ERR_INFO_OFF 8 #define FIFO_ERR_INFO_MSK (0xff << FIFO_ERR_INFO_OFF) #define INT_COAL_EN 0x19c #define OQ_INT_COAL_TIME 0x1a0 #define OQ_INT_COAL_CNT 0x1a4 #define ENT_INT_COAL_TIME 0x1a8 #define ENT_INT_COAL_CNT 0x1ac #define OQ_INT_SRC 0x1b0 #define OQ_INT_SRC_MSK 0x1b4 #define ENT_INT_SRC1 0x1b8 #define ENT_INT_SRC1_D2H_FIS_CH0_OFF 0 #define ENT_INT_SRC1_D2H_FIS_CH0_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH0_OFF) #define ENT_INT_SRC1_D2H_FIS_CH1_OFF 8 #define ENT_INT_SRC1_D2H_FIS_CH1_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH1_OFF) #define ENT_INT_SRC2 0x1bc #define ENT_INT_SRC3 0x1c0 #define ENT_INT_SRC3_WP_DEPTH_OFF 8 #define ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF 9 #define ENT_INT_SRC3_RP_DEPTH_OFF 10 #define ENT_INT_SRC3_AXI_OFF 11 #define ENT_INT_SRC3_FIFO_OFF 12 #define ENT_INT_SRC3_LM_OFF 14 #define ENT_INT_SRC3_ITC_INT_OFF 15 #define ENT_INT_SRC3_ITC_INT_MSK (0x1 << ENT_INT_SRC3_ITC_INT_OFF) #define ENT_INT_SRC3_ABT_OFF 16 #define ENT_INT_SRC_MSK1 0x1c4 #define ENT_INT_SRC_MSK2 0x1c8 #define ENT_INT_SRC_MSK3 0x1cc #define ENT_INT_SRC_MSK3_ENT95_MSK_OFF 31 #define ENT_INT_SRC_MSK3_ENT95_MSK_MSK (0x1 << ENT_INT_SRC_MSK3_ENT95_MSK_OFF) #define SAS_ECC_INTR 0x1e8 #define SAS_ECC_INTR_DQE_ECC_1B_OFF 0 #define SAS_ECC_INTR_DQE_ECC_MB_OFF 1 #define SAS_ECC_INTR_IOST_ECC_1B_OFF 2 #define SAS_ECC_INTR_IOST_ECC_MB_OFF 3 #define SAS_ECC_INTR_ITCT_ECC_MB_OFF 4 #define SAS_ECC_INTR_ITCT_ECC_1B_OFF 5 #define SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF 6 #define SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF 7 #define SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF 8 #define SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF 9 #define SAS_ECC_INTR_CQE_ECC_1B_OFF 10 #define SAS_ECC_INTR_CQE_ECC_MB_OFF 11 #define SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF 12 #define SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF 13 #define SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF 14 #define SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF 15 #define SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF 16 #define SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF 17 #define SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF 18 #define SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF 19 #define SAS_ECC_INTR_MSK 0x1ec #define HGC_ERR_STAT_EN 0x238 #define CQE_SEND_CNT 0x248 #define DLVRY_Q_0_BASE_ADDR_LO 0x260 #define DLVRY_Q_0_BASE_ADDR_HI 0x264 #define DLVRY_Q_0_DEPTH 0x268 #define DLVRY_Q_0_WR_PTR 0x26c #define DLVRY_Q_0_RD_PTR 0x270 #define HYPER_STREAM_ID_EN_CFG 0xc80 #define OQ0_INT_SRC_MSK 0xc90 #define COMPL_Q_0_BASE_ADDR_LO 0x4e0 #define COMPL_Q_0_BASE_ADDR_HI 0x4e4 #define COMPL_Q_0_DEPTH 0x4e8 #define COMPL_Q_0_WR_PTR 0x4ec #define COMPL_Q_0_RD_PTR 0x4f0 #define HGC_RXM_DFX_STATUS14 0xae8 #define HGC_RXM_DFX_STATUS14_MEM0_OFF 0 #define HGC_RXM_DFX_STATUS14_MEM0_MSK (0x1ff << \ HGC_RXM_DFX_STATUS14_MEM0_OFF) #define HGC_RXM_DFX_STATUS14_MEM1_OFF 9 #define HGC_RXM_DFX_STATUS14_MEM1_MSK (0x1ff << \ HGC_RXM_DFX_STATUS14_MEM1_OFF) #define HGC_RXM_DFX_STATUS14_MEM2_OFF 18 #define HGC_RXM_DFX_STATUS14_MEM2_MSK (0x1ff << \ HGC_RXM_DFX_STATUS14_MEM2_OFF) #define HGC_RXM_DFX_STATUS15 0xaec #define HGC_RXM_DFX_STATUS15_MEM3_OFF 0 #define HGC_RXM_DFX_STATUS15_MEM3_MSK (0x1ff << \ HGC_RXM_DFX_STATUS15_MEM3_OFF) /* phy registers need init */ #define PORT_BASE (0x2000) #define PHY_CFG (PORT_BASE + 0x0) #define HARD_PHY_LINKRATE (PORT_BASE + 0x4) #define PHY_CFG_ENA_OFF 0 #define PHY_CFG_ENA_MSK (0x1 << PHY_CFG_ENA_OFF) #define PHY_CFG_DC_OPT_OFF 2 #define PHY_CFG_DC_OPT_MSK (0x1 << PHY_CFG_DC_OPT_OFF) #define PROG_PHY_LINK_RATE (PORT_BASE + 0x8) #define PROG_PHY_LINK_RATE_MAX_OFF 0 #define PROG_PHY_LINK_RATE_MAX_MSK (0xff << PROG_PHY_LINK_RATE_MAX_OFF) #define PHY_CTRL (PORT_BASE + 0x14) #define PHY_CTRL_RESET_OFF 0 #define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF) #define SAS_PHY_CTRL (PORT_BASE + 0x20) #define SL_CFG (PORT_BASE + 0x84) #define PHY_PCN (PORT_BASE + 0x44) #define SL_TOUT_CFG (PORT_BASE + 0x8c) #define SL_CONTROL (PORT_BASE + 0x94) #define SL_CONTROL_NOTIFY_EN_OFF 0 #define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF) #define SL_CONTROL_CTA_OFF 17 #define SL_CONTROL_CTA_MSK (0x1 << SL_CONTROL_CTA_OFF) #define RX_PRIMS_STATUS (PORT_BASE + 0x98) #define RX_BCAST_CHG_OFF 1 #define RX_BCAST_CHG_MSK (0x1 << RX_BCAST_CHG_OFF) #define TX_ID_DWORD0 (PORT_BASE + 0x9c) #define TX_ID_DWORD1 (PORT_BASE + 0xa0) #define TX_ID_DWORD2 (PORT_BASE + 0xa4) #define TX_ID_DWORD3 (PORT_BASE + 0xa8) #define TX_ID_DWORD4 (PORT_BASE + 0xaC) #define TX_ID_DWORD5 (PORT_BASE + 0xb0) #define TX_ID_DWORD6 (PORT_BASE + 0xb4) #define TXID_AUTO (PORT_BASE + 0xb8) #define TXID_AUTO_CT3_OFF 1 #define TXID_AUTO_CT3_MSK (0x1 << TXID_AUTO_CT3_OFF) #define TXID_AUTO_CTB_OFF 11 #define TXID_AUTO_CTB_MSK (0x1 << TXID_AUTO_CTB_OFF) #define TX_HARDRST_OFF 2 #define TX_HARDRST_MSK (0x1 << TX_HARDRST_OFF) #define RX_IDAF_DWORD0 (PORT_BASE + 0xc4) #define RX_IDAF_DWORD1 (PORT_BASE + 0xc8) #define RX_IDAF_DWORD2 (PORT_BASE + 0xcc) #define RX_IDAF_DWORD3 (PORT_BASE + 0xd0) #define RX_IDAF_DWORD4 (PORT_BASE + 0xd4) #define RX_IDAF_DWORD5 (PORT_BASE + 0xd8) #define RX_IDAF_DWORD6 (PORT_BASE + 0xdc) #define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc) #define CON_CONTROL (PORT_BASE + 0x118) #define CON_CONTROL_CFG_OPEN_ACC_STP_OFF 0 #define CON_CONTROL_CFG_OPEN_ACC_STP_MSK \ (0x01 << CON_CONTROL_CFG_OPEN_ACC_STP_OFF) #define DONE_RECEIVED_TIME (PORT_BASE + 0x11c) #define CHL_INT0 (PORT_BASE + 0x1b4) #define CHL_INT0_HOTPLUG_TOUT_OFF 0 #define CHL_INT0_HOTPLUG_TOUT_MSK (0x1 << CHL_INT0_HOTPLUG_TOUT_OFF) #define CHL_INT0_SL_RX_BCST_ACK_OFF 1 #define CHL_INT0_SL_RX_BCST_ACK_MSK (0x1 << CHL_INT0_SL_RX_BCST_ACK_OFF) #define CHL_INT0_SL_PHY_ENABLE_OFF 2 #define CHL_INT0_SL_PHY_ENABLE_MSK (0x1 << CHL_INT0_SL_PHY_ENABLE_OFF) #define CHL_INT0_NOT_RDY_OFF 4 #define CHL_INT0_NOT_RDY_MSK (0x1 << CHL_INT0_NOT_RDY_OFF) #define CHL_INT0_PHY_RDY_OFF 5 #define CHL_INT0_PHY_RDY_MSK (0x1 << CHL_INT0_PHY_RDY_OFF) #define CHL_INT1 (PORT_BASE + 0x1b8) #define CHL_INT1_DMAC_TX_ECC_ERR_OFF 15 #define CHL_INT1_DMAC_TX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_TX_ECC_ERR_OFF) #define CHL_INT1_DMAC_RX_ECC_ERR_OFF 17 #define CHL_INT1_DMAC_RX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_RX_ECC_ERR_OFF) #define CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF 19 #define CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF 20 #define CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF 21 #define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22 #define CHL_INT2 (PORT_BASE + 0x1bc) #define CHL_INT2_SL_IDAF_TOUT_CONF_OFF 0 #define CHL_INT0_MSK (PORT_BASE + 0x1c0) #define CHL_INT1_MSK (PORT_BASE + 0x1c4) #define CHL_INT2_MSK (PORT_BASE + 0x1c8) #define CHL_INT_COAL_EN (PORT_BASE + 0x1d0) #define DMA_TX_DFX0 (PORT_BASE + 0x200) #define DMA_TX_DFX1 (PORT_BASE + 0x204) #define DMA_TX_DFX1_IPTT_OFF 0 #define DMA_TX_DFX1_IPTT_MSK (0xffff << DMA_TX_DFX1_IPTT_OFF) #define DMA_TX_FIFO_DFX0 (PORT_BASE + 0x240) #define PORT_DFX0 (PORT_BASE + 0x258) #define LINK_DFX2 (PORT_BASE + 0X264) #define LINK_DFX2_RCVR_HOLD_STS_OFF 9 #define LINK_DFX2_RCVR_HOLD_STS_MSK (0x1 << LINK_DFX2_RCVR_HOLD_STS_OFF) #define LINK_DFX2_SEND_HOLD_STS_OFF 10 #define LINK_DFX2_SEND_HOLD_STS_MSK (0x1 << LINK_DFX2_SEND_HOLD_STS_OFF) #define SAS_ERR_CNT4_REG (PORT_BASE + 0x290) #define SAS_ERR_CNT6_REG (PORT_BASE + 0x298) #define PHY_CTRL_RDY_MSK (PORT_BASE + 0x2b0) #define PHYCTRL_NOT_RDY_MSK (PORT_BASE + 0x2b4) #define PHYCTRL_DWS_RESET_MSK (PORT_BASE + 0x2b8) #define PHYCTRL_PHY_ENA_MSK (PORT_BASE + 0x2bc) #define SL_RX_BCAST_CHK_MSK (PORT_BASE + 0x2c0) #define PHYCTRL_OOB_RESTART_MSK (PORT_BASE + 0x2c4) #define DMA_TX_STATUS (PORT_BASE + 0x2d0) #define DMA_TX_STATUS_BUSY_OFF 0 #define DMA_TX_STATUS_BUSY_MSK (0x1 << DMA_TX_STATUS_BUSY_OFF) #define DMA_RX_STATUS (PORT_BASE + 0x2e8) #define DMA_RX_STATUS_BUSY_OFF 0 #define DMA_RX_STATUS_BUSY_MSK (0x1 << DMA_RX_STATUS_BUSY_OFF) #define AXI_CFG (0x5100) #define AM_CFG_MAX_TRANS (0x5010) #define AM_CFG_SINGLE_PORT_MAX_TRANS (0x5014) #define AXI_MASTER_CFG_BASE (0x5000) #define AM_CTRL_GLOBAL (0x0) #define AM_CURR_TRANS_RETURN (0x150) /* HW dma structures */ /* Delivery queue header */ /* dw0 */ #define CMD_HDR_ABORT_FLAG_OFF 0 #define CMD_HDR_ABORT_FLAG_MSK (0x3 << CMD_HDR_ABORT_FLAG_OFF) #define CMD_HDR_ABORT_DEVICE_TYPE_OFF 2 #define CMD_HDR_ABORT_DEVICE_TYPE_MSK (0x1 << CMD_HDR_ABORT_DEVICE_TYPE_OFF) #define CMD_HDR_RESP_REPORT_OFF 5 #define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF) #define CMD_HDR_TLR_CTRL_OFF 6 #define CMD_HDR_TLR_CTRL_MSK (0x3 << CMD_HDR_TLR_CTRL_OFF) #define CMD_HDR_PHY_ID_OFF 8 #define CMD_HDR_PHY_ID_MSK (0x1ff << CMD_HDR_PHY_ID_OFF) #define CMD_HDR_FORCE_PHY_OFF 17 #define CMD_HDR_FORCE_PHY_MSK (0x1 << CMD_HDR_FORCE_PHY_OFF) #define CMD_HDR_PORT_OFF 18 #define CMD_HDR_PORT_MSK (0xf << CMD_HDR_PORT_OFF) #define CMD_HDR_PRIORITY_OFF 27 #define CMD_HDR_PRIORITY_MSK (0x1 << CMD_HDR_PRIORITY_OFF) #define CMD_HDR_CMD_OFF 29 #define CMD_HDR_CMD_MSK (0x7 << CMD_HDR_CMD_OFF) /* dw1 */ #define CMD_HDR_DIR_OFF 5 #define CMD_HDR_DIR_MSK (0x3 << CMD_HDR_DIR_OFF) #define CMD_HDR_RESET_OFF 7 #define CMD_HDR_RESET_MSK (0x1 << CMD_HDR_RESET_OFF) #define CMD_HDR_VDTL_OFF 10 #define CMD_HDR_VDTL_MSK (0x1 << CMD_HDR_VDTL_OFF) #define CMD_HDR_FRAME_TYPE_OFF 11 #define CMD_HDR_FRAME_TYPE_MSK (0x1f << CMD_HDR_FRAME_TYPE_OFF) #define CMD_HDR_DEV_ID_OFF 16 #define CMD_HDR_DEV_ID_MSK (0xffff << CMD_HDR_DEV_ID_OFF) /* dw2 */ #define CMD_HDR_CFL_OFF 0 #define CMD_HDR_CFL_MSK (0x1ff << CMD_HDR_CFL_OFF) #define CMD_HDR_NCQ_TAG_OFF 10 #define CMD_HDR_NCQ_TAG_MSK (0x1f << CMD_HDR_NCQ_TAG_OFF) #define CMD_HDR_MRFL_OFF 15 #define CMD_HDR_MRFL_MSK (0x1ff << CMD_HDR_MRFL_OFF) #define CMD_HDR_SG_MOD_OFF 24 #define CMD_HDR_SG_MOD_MSK (0x3 << CMD_HDR_SG_MOD_OFF) #define CMD_HDR_FIRST_BURST_OFF 26 #define CMD_HDR_FIRST_BURST_MSK (0x1 << CMD_HDR_SG_MOD_OFF) /* dw3 */ #define CMD_HDR_IPTT_OFF 0 #define CMD_HDR_IPTT_MSK (0xffff << CMD_HDR_IPTT_OFF) /* dw6 */ #define CMD_HDR_DIF_SGL_LEN_OFF 0 #define CMD_HDR_DIF_SGL_LEN_MSK (0xffff << CMD_HDR_DIF_SGL_LEN_OFF) #define CMD_HDR_DATA_SGL_LEN_OFF 16 #define CMD_HDR_DATA_SGL_LEN_MSK (0xffff << CMD_HDR_DATA_SGL_LEN_OFF) #define CMD_HDR_ABORT_IPTT_OFF 16 #define CMD_HDR_ABORT_IPTT_MSK (0xffff << CMD_HDR_ABORT_IPTT_OFF) /* Completion header */ /* dw0 */ #define CMPLT_HDR_ERR_PHASE_OFF 2 #define CMPLT_HDR_ERR_PHASE_MSK (0xff << CMPLT_HDR_ERR_PHASE_OFF) #define CMPLT_HDR_RSPNS_XFRD_OFF 10 #define CMPLT_HDR_RSPNS_XFRD_MSK (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF) #define CMPLT_HDR_ERX_OFF 12 #define CMPLT_HDR_ERX_MSK (0x1 << CMPLT_HDR_ERX_OFF) #define CMPLT_HDR_ABORT_STAT_OFF 13 #define CMPLT_HDR_ABORT_STAT_MSK (0x7 << CMPLT_HDR_ABORT_STAT_OFF) /* abort_stat */ #define STAT_IO_NOT_VALID 0x1 #define STAT_IO_NO_DEVICE 0x2 #define STAT_IO_COMPLETE 0x3 #define STAT_IO_ABORTED 0x4 /* dw1 */ #define CMPLT_HDR_IPTT_OFF 0 #define CMPLT_HDR_IPTT_MSK (0xffff << CMPLT_HDR_IPTT_OFF) #define CMPLT_HDR_DEV_ID_OFF 16 #define CMPLT_HDR_DEV_ID_MSK (0xffff << CMPLT_HDR_DEV_ID_OFF) /* ITCT header */ /* qw0 */ #define ITCT_HDR_DEV_TYPE_OFF 0 #define ITCT_HDR_DEV_TYPE_MSK (0x3 << ITCT_HDR_DEV_TYPE_OFF) #define ITCT_HDR_VALID_OFF 2 #define ITCT_HDR_VALID_MSK (0x1 << ITCT_HDR_VALID_OFF) #define ITCT_HDR_MCR_OFF 5 #define ITCT_HDR_MCR_MSK (0xf << ITCT_HDR_MCR_OFF) #define ITCT_HDR_VLN_OFF 9 #define ITCT_HDR_VLN_MSK (0xf << ITCT_HDR_VLN_OFF) #define ITCT_HDR_SMP_TIMEOUT_OFF 16 #define ITCT_HDR_SMP_TIMEOUT_8US 1 #define ITCT_HDR_SMP_TIMEOUT (ITCT_HDR_SMP_TIMEOUT_8US * \ 250) /* 2ms */ #define ITCT_HDR_AWT_CONTINUE_OFF 25 #define ITCT_HDR_PORT_ID_OFF 28 #define ITCT_HDR_PORT_ID_MSK (0xf << ITCT_HDR_PORT_ID_OFF) /* qw2 */ #define ITCT_HDR_INLT_OFF 0 #define ITCT_HDR_INLT_MSK (0xffffULL << ITCT_HDR_INLT_OFF) #define ITCT_HDR_BITLT_OFF 16 #define ITCT_HDR_BITLT_MSK (0xffffULL << ITCT_HDR_BITLT_OFF) #define ITCT_HDR_MCTLT_OFF 32 #define ITCT_HDR_MCTLT_MSK (0xffffULL << ITCT_HDR_MCTLT_OFF) #define ITCT_HDR_RTOLT_OFF 48 #define ITCT_HDR_RTOLT_MSK (0xffffULL << ITCT_HDR_RTOLT_OFF) #define HISI_SAS_FATAL_INT_NR 2 struct hisi_sas_complete_v2_hdr { __le32 dw0; __le32 dw1; __le32 act; __le32 dw3; }; struct hisi_sas_err_record_v2 { /* dw0 */ __le32 trans_tx_fail_type; /* dw1 */ __le32 trans_rx_fail_type; /* dw2 */ __le16 dma_tx_err_type; __le16 sipc_rx_err_type; /* dw3 */ __le32 dma_rx_err_type; }; struct signal_attenuation_s { u32 de_emphasis; u32 preshoot; u32 boost; }; struct sig_atten_lu_s { const struct signal_attenuation_s *att; u32 sas_phy_ctrl; }; static const struct hisi_sas_hw_error one_bit_ecc_errors[] = { { .irq_msk = BIT(SAS_ECC_INTR_DQE_ECC_1B_OFF), .msk = HGC_DQE_ECC_1B_ADDR_MSK, .shift = HGC_DQE_ECC_1B_ADDR_OFF, .msg = "hgc_dqe_ecc1b_intr", .reg = HGC_DQE_ECC_ADDR, }, { .irq_msk = BIT(SAS_ECC_INTR_IOST_ECC_1B_OFF), .msk = HGC_IOST_ECC_1B_ADDR_MSK, .shift = HGC_IOST_ECC_1B_ADDR_OFF, .msg = "hgc_iost_ecc1b_intr", .reg = HGC_IOST_ECC_ADDR, }, { .irq_msk = BIT(SAS_ECC_INTR_ITCT_ECC_1B_OFF), .msk = HGC_ITCT_ECC_1B_ADDR_MSK, .shift = HGC_ITCT_ECC_1B_ADDR_OFF, .msg = "hgc_itct_ecc1b_intr", .reg = HGC_ITCT_ECC_ADDR, }, { .irq_msk = BIT(SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF), .msk = HGC_LM_DFX_STATUS2_IOSTLIST_MSK, .shift = HGC_LM_DFX_STATUS2_IOSTLIST_OFF, .msg = "hgc_iostl_ecc1b_intr", .reg = HGC_LM_DFX_STATUS2, }, { .irq_msk = BIT(SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF), .msk = HGC_LM_DFX_STATUS2_ITCTLIST_MSK, .shift = HGC_LM_DFX_STATUS2_ITCTLIST_OFF, .msg = "hgc_itctl_ecc1b_intr", .reg = HGC_LM_DFX_STATUS2, }, { .irq_msk = BIT(SAS_ECC_INTR_CQE_ECC_1B_OFF), .msk = HGC_CQE_ECC_1B_ADDR_MSK, .shift = HGC_CQE_ECC_1B_ADDR_OFF, .msg = "hgc_cqe_ecc1b_intr", .reg = HGC_CQE_ECC_ADDR, }, { .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF), .msk = HGC_RXM_DFX_STATUS14_MEM0_MSK, .shift = HGC_RXM_DFX_STATUS14_MEM0_OFF, .msg = "rxm_mem0_ecc1b_intr", .reg = HGC_RXM_DFX_STATUS14, }, { .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF), .msk = HGC_RXM_DFX_STATUS14_MEM1_MSK, .shift = HGC_RXM_DFX_STATUS14_MEM1_OFF, .msg = "rxm_mem1_ecc1b_intr", .reg = HGC_RXM_DFX_STATUS14, }, { .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF), .msk = HGC_RXM_DFX_STATUS14_MEM2_MSK, .shift = HGC_RXM_DFX_STATUS14_MEM2_OFF, .msg = "rxm_mem2_ecc1b_intr", .reg = HGC_RXM_DFX_STATUS14, }, { .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF), .msk = HGC_RXM_DFX_STATUS15_MEM3_MSK, .shift = HGC_RXM_DFX_STATUS15_MEM3_OFF, .msg = "rxm_mem3_ecc1b_intr", .reg = HGC_RXM_DFX_STATUS15, }, }; static const struct hisi_sas_hw_error multi_bit_ecc_errors[] = { { .irq_msk = BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF), .msk = HGC_DQE_ECC_MB_ADDR_MSK, .shift = HGC_DQE_ECC_MB_ADDR_OFF, .msg = "hgc_dqe_eccbad_intr", .reg = HGC_DQE_ECC_ADDR, }, { .irq_msk = BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF), .msk = HGC_IOST_ECC_MB_ADDR_MSK, .shift = HGC_IOST_ECC_MB_ADDR_OFF, .msg = "hgc_iost_eccbad_intr", .reg = HGC_IOST_ECC_ADDR, }, { .irq_msk = BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF), .msk = HGC_ITCT_ECC_MB_ADDR_MSK, .shift = HGC_ITCT_ECC_MB_ADDR_OFF, .msg = "hgc_itct_eccbad_intr", .reg = HGC_ITCT_ECC_ADDR, }, { .irq_msk = BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF), .msk = HGC_LM_DFX_STATUS2_IOSTLIST_MSK, .shift = HGC_LM_DFX_STATUS2_IOSTLIST_OFF, .msg = "hgc_iostl_eccbad_intr", .reg = HGC_LM_DFX_STATUS2, }, { .irq_msk = BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF), .msk = HGC_LM_DFX_STATUS2_ITCTLIST_MSK, .shift = HGC_LM_DFX_STATUS2_ITCTLIST_OFF, .msg = "hgc_itctl_eccbad_intr", .reg = HGC_LM_DFX_STATUS2, }, { .irq_msk = BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF), .msk = HGC_CQE_ECC_MB_ADDR_MSK, .shift = HGC_CQE_ECC_MB_ADDR_OFF, .msg = "hgc_cqe_eccbad_intr", .reg = HGC_CQE_ECC_ADDR, }, { .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF), .msk = HGC_RXM_DFX_STATUS14_MEM0_MSK, .shift = HGC_RXM_DFX_STATUS14_MEM0_OFF, .msg = "rxm_mem0_eccbad_intr", .reg = HGC_RXM_DFX_STATUS14, }, { .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF), .msk = HGC_RXM_DFX_STATUS14_MEM1_MSK, .shift = HGC_RXM_DFX_STATUS14_MEM1_OFF, .msg = "rxm_mem1_eccbad_intr", .reg = HGC_RXM_DFX_STATUS14, }, { .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF), .msk = HGC_RXM_DFX_STATUS14_MEM2_MSK, .shift = HGC_RXM_DFX_STATUS14_MEM2_OFF, .msg = "rxm_mem2_eccbad_intr", .reg = HGC_RXM_DFX_STATUS14, }, { .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF), .msk = HGC_RXM_DFX_STATUS15_MEM3_MSK, .shift = HGC_RXM_DFX_STATUS15_MEM3_OFF, .msg = "rxm_mem3_eccbad_intr", .reg = HGC_RXM_DFX_STATUS15, }, }; enum { HISI_SAS_PHY_PHY_UPDOWN, HISI_SAS_PHY_CHNL_INT, HISI_SAS_PHY_INT_NR }; enum { TRANS_TX_FAIL_BASE = 0x0, /* dw0 */ TRANS_RX_FAIL_BASE = 0x20, /* dw1 */ DMA_TX_ERR_BASE = 0x40, /* dw2 bit 15-0 */ SIPC_RX_ERR_BASE = 0x50, /* dw2 bit 31-16*/ DMA_RX_ERR_BASE = 0x60, /* dw3 */ /* trans tx*/ TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS = TRANS_TX_FAIL_BASE, /* 0x0 */ TRANS_TX_ERR_PHY_NOT_ENABLE, /* 0x1 */ TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION, /* 0x2 */ TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION, /* 0x3 */ TRANS_TX_OPEN_CNX_ERR_BY_OTHER, /* 0x4 */ RESERVED0, /* 0x5 */ TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT, /* 0x6 */ TRANS_TX_OPEN_CNX_ERR_STP_RESOURCES_BUSY, /* 0x7 */ TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED, /* 0x8 */ TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED, /* 0x9 */ TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION, /* 0xa */ TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD, /* 0xb */ TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER, /* 0xc */ TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED, /* 0xd */ TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT, /* 0xe */ TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION, /* 0xf */ TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED, /* 0x10 */ TRANS_TX_ERR_FRAME_TXED, /* 0x11 */ TRANS_TX_ERR_WITH_BREAK_TIMEOUT, /* 0x12 */ TRANS_TX_ERR_WITH_BREAK_REQUEST, /* 0x13 */ TRANS_TX_ERR_WITH_BREAK_RECEVIED, /* 0x14 */ TRANS_TX_ERR_WITH_CLOSE_TIMEOUT, /* 0x15 */ TRANS_TX_ERR_WITH_CLOSE_NORMAL, /* 0x16 for ssp*/ TRANS_TX_ERR_WITH_CLOSE_PHYDISALE, /* 0x17 */ TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT, /* 0x18 */ TRANS_TX_ERR_WITH_CLOSE_COMINIT, /* 0x19 */ TRANS_TX_ERR_WITH_NAK_RECEVIED, /* 0x1a for ssp*/ TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT, /* 0x1b for ssp*/ /*IO_TX_ERR_WITH_R_ERR_RECEVIED, [> 0x1b for sata/stp<] */ TRANS_TX_ERR_WITH_CREDIT_TIMEOUT, /* 0x1c for ssp */ /*IO_RX_ERR_WITH_SATA_DEVICE_LOST 0x1c for sata/stp */ TRANS_TX_ERR_WITH_IPTT_CONFLICT, /* 0x1d for ssp/smp */ TRANS_TX_ERR_WITH_OPEN_BY_DES_OR_OTHERS, /* 0x1e */ /*IO_TX_ERR_WITH_SYNC_RXD, [> 0x1e <] for sata/stp */ TRANS_TX_ERR_WITH_WAIT_RECV_TIMEOUT, /* 0x1f for sata/stp */ /* trans rx */ TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR = TRANS_RX_FAIL_BASE, /* 0x20 */ TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR, /* 0x21 for sata/stp */ TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM, /* 0x22 for ssp/smp */ /*IO_ERR_WITH_RXFIS_8B10B_CODE_ERR, [> 0x22 <] for sata/stp */ TRANS_RX_ERR_WITH_RXFIS_DECODE_ERROR, /* 0x23 for sata/stp */ TRANS_RX_ERR_WITH_RXFIS_CRC_ERR, /* 0x24 for sata/stp */ TRANS_RX_ERR_WITH_RXFRAME_LENGTH_OVERRUN, /* 0x25 for smp */ /*IO_ERR_WITH_RXFIS_TX SYNCP, [> 0x25 <] for sata/stp */ TRANS_RX_ERR_WITH_RXFIS_RX_SYNCP, /* 0x26 for sata/stp*/ TRANS_RX_ERR_WITH_LINK_BUF_OVERRUN, /* 0x27 */ TRANS_RX_ERR_WITH_BREAK_TIMEOUT, /* 0x28 */ TRANS_RX_ERR_WITH_BREAK_REQUEST, /* 0x29 */ TRANS_RX_ERR_WITH_BREAK_RECEVIED, /* 0x2a */ RESERVED1, /* 0x2b */ TRANS_RX_ERR_WITH_CLOSE_NORMAL, /* 0x2c */ TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE, /* 0x2d */ TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT, /* 0x2e */ TRANS_RX_ERR_WITH_CLOSE_COMINIT, /* 0x2f */ TRANS_RX_ERR_WITH_DATA_LEN0, /* 0x30 for ssp/smp */ TRANS_RX_ERR_WITH_BAD_HASH, /* 0x31 for ssp */ /*IO_RX_ERR_WITH_FIS_TOO_SHORT, [> 0x31 <] for sata/stp */ TRANS_RX_XRDY_WLEN_ZERO_ERR, /* 0x32 for ssp*/ /*IO_RX_ERR_WITH_FIS_TOO_LONG, [> 0x32 <] for sata/stp */ TRANS_RX_SSP_FRM_LEN_ERR, /* 0x33 for ssp */ /*IO_RX_ERR_WITH_SATA_DEVICE_LOST, [> 0x33 <] for sata */ RESERVED2, /* 0x34 */ RESERVED3, /* 0x35 */ RESERVED4, /* 0x36 */ RESERVED5, /* 0x37 */ TRANS_RX_ERR_WITH_BAD_FRM_TYPE, /* 0x38 */ TRANS_RX_SMP_FRM_LEN_ERR, /* 0x39 */ TRANS_RX_SMP_RESP_TIMEOUT_ERR, /* 0x3a */ RESERVED6, /* 0x3b */ RESERVED7, /* 0x3c */ RESERVED8, /* 0x3d */ RESERVED9, /* 0x3e */ TRANS_RX_R_ERR, /* 0x3f */ /* dma tx */ DMA_TX_DIF_CRC_ERR = DMA_TX_ERR_BASE, /* 0x40 */ DMA_TX_DIF_APP_ERR, /* 0x41 */ DMA_TX_DIF_RPP_ERR, /* 0x42 */ DMA_TX_DATA_SGL_OVERFLOW, /* 0x43 */ DMA_TX_DIF_SGL_OVERFLOW, /* 0x44 */ DMA_TX_UNEXP_XFER_ERR, /* 0x45 */ DMA_TX_UNEXP_RETRANS_ERR, /* 0x46 */ DMA_TX_XFER_LEN_OVERFLOW, /* 0x47 */ DMA_TX_XFER_OFFSET_ERR, /* 0x48 */ DMA_TX_RAM_ECC_ERR, /* 0x49 */ DMA_TX_DIF_LEN_ALIGN_ERR, /* 0x4a */ DMA_TX_MAX_ERR_CODE, /* sipc rx */ SIPC_RX_FIS_STATUS_ERR_BIT_VLD = SIPC_RX_ERR_BASE, /* 0x50 */ SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR, /* 0x51 */ SIPC_RX_FIS_STATUS_BSY_BIT_ERR, /* 0x52 */ SIPC_RX_WRSETUP_LEN_ODD_ERR, /* 0x53 */ SIPC_RX_WRSETUP_LEN_ZERO_ERR, /* 0x54 */ SIPC_RX_WRDATA_LEN_NOT_MATCH_ERR, /* 0x55 */ SIPC_RX_NCQ_WRSETUP_OFFSET_ERR, /* 0x56 */ SIPC_RX_NCQ_WRSETUP_AUTO_ACTIVE_ERR, /* 0x57 */ SIPC_RX_SATA_UNEXP_FIS_ERR, /* 0x58 */ SIPC_RX_WRSETUP_ESTATUS_ERR, /* 0x59 */ SIPC_RX_DATA_UNDERFLOW_ERR, /* 0x5a */ SIPC_RX_MAX_ERR_CODE, /* dma rx */ DMA_RX_DIF_CRC_ERR = DMA_RX_ERR_BASE, /* 0x60 */ DMA_RX_DIF_APP_ERR, /* 0x61 */ DMA_RX_DIF_RPP_ERR, /* 0x62 */ DMA_RX_DATA_SGL_OVERFLOW, /* 0x63 */ DMA_RX_DIF_SGL_OVERFLOW, /* 0x64 */ DMA_RX_DATA_LEN_OVERFLOW, /* 0x65 */ DMA_RX_DATA_LEN_UNDERFLOW, /* 0x66 */ DMA_RX_DATA_OFFSET_ERR, /* 0x67 */ RESERVED10, /* 0x68 */ DMA_RX_SATA_FRAME_TYPE_ERR, /* 0x69 */ DMA_RX_RESP_BUF_OVERFLOW, /* 0x6a */ DMA_RX_UNEXP_RETRANS_RESP_ERR, /* 0x6b */ DMA_RX_UNEXP_NORM_RESP_ERR, /* 0x6c */ DMA_RX_UNEXP_RDFRAME_ERR, /* 0x6d */ DMA_RX_PIO_DATA_LEN_ERR, /* 0x6e */ DMA_RX_RDSETUP_STATUS_ERR, /* 0x6f */ DMA_RX_RDSETUP_STATUS_DRQ_ERR, /* 0x70 */ DMA_RX_RDSETUP_STATUS_BSY_ERR, /* 0x71 */ DMA_RX_RDSETUP_LEN_ODD_ERR, /* 0x72 */ DMA_RX_RDSETUP_LEN_ZERO_ERR, /* 0x73 */ DMA_RX_RDSETUP_LEN_OVER_ERR, /* 0x74 */ DMA_RX_RDSETUP_OFFSET_ERR, /* 0x75 */ DMA_RX_RDSETUP_ACTIVE_ERR, /* 0x76 */ DMA_RX_RDSETUP_ESTATUS_ERR, /* 0x77 */ DMA_RX_RAM_ECC_ERR, /* 0x78 */ DMA_RX_UNKNOWN_FRM_ERR, /* 0x79 */ DMA_RX_MAX_ERR_CODE, }; #define HISI_SAS_COMMAND_ENTRIES_V2_HW 4096 #define HISI_MAX_SATA_SUPPORT_V2_HW (HISI_SAS_COMMAND_ENTRIES_V2_HW/64 - 1) #define DIR_NO_DATA 0 #define DIR_TO_INI 1 #define DIR_TO_DEVICE 2 #define DIR_RESERVED 3 #define ERR_ON_TX_PHASE(err_phase) (err_phase == 0x2 || \ err_phase == 0x4 || err_phase == 0x8 ||\ err_phase == 0x6 || err_phase == 0xa) #define ERR_ON_RX_PHASE(err_phase) (err_phase == 0x10 || \ err_phase == 0x20 || err_phase == 0x40) static void link_timeout_disable_link(struct timer_list *t); static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off) { void __iomem *regs = hisi_hba->regs + off; return readl(regs); } static u32 hisi_sas_read32_relaxed(struct hisi_hba *hisi_hba, u32 off) { void __iomem *regs = hisi_hba->regs + off; return readl_relaxed(regs); } static void hisi_sas_write32(struct hisi_hba *hisi_hba, u32 off, u32 val) { void __iomem *regs = hisi_hba->regs + off; writel(val, regs); } static void hisi_sas_phy_write32(struct hisi_hba *hisi_hba, int phy_no, u32 off, u32 val) { void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off; writel(val, regs); } static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba, int phy_no, u32 off) { void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off; return readl(regs); } /* This function needs to be protected from pre-emption. */ static int slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, struct domain_device *device) { int sata_dev = dev_is_sata(device); void *bitmap = hisi_hba->slot_index_tags; struct hisi_sas_device *sas_dev = device->lldd_dev; int sata_idx = sas_dev->sata_idx; int start, end; if (!sata_dev) { /* * STP link SoC bug workaround: index starts from 1. * additionally, we can only allocate odd IPTT(1~4095) * for SAS/SMP device. */ start = 1; end = hisi_hba->slot_index_count; } else { if (sata_idx >= HISI_MAX_SATA_SUPPORT_V2_HW) return -EINVAL; /* * For SATA device: allocate even IPTT in this interval * [64*(sata_idx+1), 64*(sata_idx+2)], then each SATA device * own 32 IPTTs. IPTT 0 shall not be used duing to STP link * SoC bug workaround. So we ignore the first 32 even IPTTs. */ start = 64 * (sata_idx + 1); end = 64 * (sata_idx + 2); } spin_lock(&hisi_hba->lock); while (1) { start = find_next_zero_bit(bitmap, hisi_hba->slot_index_count, start); if (start >= end) { spin_unlock(&hisi_hba->lock); return -SAS_QUEUE_FULL; } /* * SAS IPTT bit0 should be 1, and SATA IPTT bit0 should be 0. */ if (sata_dev ^ (start & 1)) break; start++; } set_bit(start, bitmap); spin_unlock(&hisi_hba->lock); return start; } static bool sata_index_alloc_v2_hw(struct hisi_hba *hisi_hba, int *idx) { unsigned int index; struct device *dev = hisi_hba->dev; void *bitmap = hisi_hba->sata_dev_bitmap; index = find_first_zero_bit(bitmap, HISI_MAX_SATA_SUPPORT_V2_HW); if (index >= HISI_MAX_SATA_SUPPORT_V2_HW) { dev_warn(dev, "alloc sata index failed, index=%d\n", index); return false; } set_bit(index, bitmap); *idx = index; return true; } static struct hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device) { struct hisi_hba *hisi_hba = device->port->ha->lldd_ha; struct hisi_sas_device *sas_dev = NULL; int i, sata_dev = dev_is_sata(device); int sata_idx = -1; spin_lock(&hisi_hba->lock); if (sata_dev) if (!sata_index_alloc_v2_hw(hisi_hba, &sata_idx)) goto out; for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { /* * SATA device id bit0 should be 0 */ if (sata_dev && (i & 1)) continue; if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) { int queue = i % hisi_hba->queue_count; struct hisi_sas_dq *dq = &hisi_hba->dq[queue]; hisi_hba->devices[i].device_id = i; sas_dev = &hisi_hba->devices[i]; sas_dev->dev_status = HISI_SAS_DEV_INIT; sas_dev->dev_type = device->dev_type; sas_dev->hisi_hba = hisi_hba; sas_dev->sas_device = device; sas_dev->sata_idx = sata_idx; sas_dev->dq = dq; spin_lock_init(&sas_dev->lock); INIT_LIST_HEAD(&hisi_hba->devices[i].list); break; } } out: spin_unlock(&hisi_hba->lock); return sas_dev; } static void config_phy_opt_mode_v2_hw(struct hisi_hba *hisi_hba, int phy_no) { u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); cfg &= ~PHY_CFG_DC_OPT_MSK; cfg |= 1 << PHY_CFG_DC_OPT_OFF; hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); } static void config_id_frame_v2_hw(struct hisi_hba *hisi_hba, int phy_no) { struct sas_identify_frame identify_frame; u32 *identify_buffer; memset(&identify_frame, 0, sizeof(identify_frame)); identify_frame.dev_type = SAS_END_DEVICE; identify_frame.frame_type = 0; identify_frame._un1 = 1; identify_frame.initiator_bits = SAS_PROTOCOL_ALL; identify_frame.target_bits = SAS_PROTOCOL_NONE; memcpy(&identify_frame._un4_11[0], hisi_hba->sas_addr, SAS_ADDR_SIZE); memcpy(&identify_frame.sas_addr[0], hisi_hba->sas_addr, SAS_ADDR_SIZE); identify_frame.phy_id = phy_no; identify_buffer = (u32 *)(&identify_frame); hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0, __swab32(identify_buffer[0])); hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1, __swab32(identify_buffer[1])); hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2, __swab32(identify_buffer[2])); hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3, __swab32(identify_buffer[3])); hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4, __swab32(identify_buffer[4])); hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5, __swab32(identify_buffer[5])); } static void setup_itct_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_device *sas_dev) { struct domain_device *device = sas_dev->sas_device; struct device *dev = hisi_hba->dev; u64 qw0, device_id = sas_dev->device_id; struct hisi_sas_itct *itct = &hisi_hba->itct[device_id]; struct domain_device *parent_dev = device->parent; struct asd_sas_port *sas_port = device->port; struct hisi_sas_port *port = to_hisi_sas_port(sas_port); u64 sas_addr; memset(itct, 0, sizeof(*itct)); /* qw0 */ qw0 = 0; switch (sas_dev->dev_type) { case SAS_END_DEVICE: case SAS_EDGE_EXPANDER_DEVICE: case SAS_FANOUT_EXPANDER_DEVICE: qw0 = HISI_SAS_DEV_TYPE_SSP << ITCT_HDR_DEV_TYPE_OFF; break; case SAS_SATA_DEV: case SAS_SATA_PENDING: if (parent_dev && dev_is_expander(parent_dev->dev_type)) qw0 = HISI_SAS_DEV_TYPE_STP << ITCT_HDR_DEV_TYPE_OFF; else qw0 = HISI_SAS_DEV_TYPE_SATA << ITCT_HDR_DEV_TYPE_OFF; break; default: dev_warn(dev, "setup itct: unsupported dev type (%d)\n", sas_dev->dev_type); } qw0 |= ((1 << ITCT_HDR_VALID_OFF) | (device->linkrate << ITCT_HDR_MCR_OFF) | (1 << ITCT_HDR_VLN_OFF) | (ITCT_HDR_SMP_TIMEOUT << ITCT_HDR_SMP_TIMEOUT_OFF) | (1 << ITCT_HDR_AWT_CONTINUE_OFF) | (port->id << ITCT_HDR_PORT_ID_OFF)); itct->qw0 = cpu_to_le64(qw0); /* qw1 */ memcpy(&sas_addr, device->sas_addr, SAS_ADDR_SIZE); itct->sas_addr = cpu_to_le64(__swab64(sas_addr)); /* qw2 */ if (!dev_is_sata(device)) itct->qw2 = cpu_to_le64((5000ULL << ITCT_HDR_INLT_OFF) | (0x1ULL << ITCT_HDR_BITLT_OFF) | (0x32ULL << ITCT_HDR_MCTLT_OFF) | (0x1ULL << ITCT_HDR_RTOLT_OFF)); } static int clear_itct_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_device *sas_dev) { DECLARE_COMPLETION_ONSTACK(completion); u64 dev_id = sas_dev->device_id; struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id]; u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); struct device *dev = hisi_hba->dev; int i; sas_dev->completion = &completion; /* clear the itct interrupt state */ if (ENT_INT_SRC3_ITC_INT_MSK & reg_val) hisi_sas_write32(hisi_hba, ENT_INT_SRC3, ENT_INT_SRC3_ITC_INT_MSK); /* need to set register twice to clear ITCT for v2 hw */ for (i = 0; i < 2; i++) { reg_val = ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK); hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val); if (!wait_for_completion_timeout(sas_dev->completion, HISI_SAS_CLEAR_ITCT_TIMEOUT)) { dev_warn(dev, "failed to clear ITCT\n"); return -ETIMEDOUT; } memset(itct, 0, sizeof(struct hisi_sas_itct)); } return 0; } static void free_device_v2_hw(struct hisi_sas_device *sas_dev) { struct hisi_hba *hisi_hba = sas_dev->hisi_hba; /* SoC bug workaround */ if (dev_is_sata(sas_dev->sas_device)) clear_bit(sas_dev->sata_idx, hisi_hba->sata_dev_bitmap); } static int reset_hw_v2_hw(struct hisi_hba *hisi_hba) { int i, reset_val; u32 val; unsigned long end_time; struct device *dev = hisi_hba->dev; /* The mask needs to be set depending on the number of phys */ if (hisi_hba->n_phy == 9) reset_val = 0x1fffff; else reset_val = 0x7ffff; hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0); /* Disable all of the PHYs */ for (i = 0; i < hisi_hba->n_phy; i++) { u32 phy_cfg = hisi_sas_phy_read32(hisi_hba, i, PHY_CFG); phy_cfg &= ~PHY_CTRL_RESET_MSK; hisi_sas_phy_write32(hisi_hba, i, PHY_CFG, phy_cfg); } udelay(50); /* Ensure DMA tx & rx idle */ for (i = 0; i < hisi_hba->n_phy; i++) { u32 dma_tx_status, dma_rx_status; end_time = jiffies + msecs_to_jiffies(1000); while (1) { dma_tx_status = hisi_sas_phy_read32(hisi_hba, i, DMA_TX_STATUS); dma_rx_status = hisi_sas_phy_read32(hisi_hba, i, DMA_RX_STATUS); if (!(dma_tx_status & DMA_TX_STATUS_BUSY_MSK) && !(dma_rx_status & DMA_RX_STATUS_BUSY_MSK)) break; msleep(20); if (time_after(jiffies, end_time)) return -EIO; } } /* Ensure axi bus idle */ end_time = jiffies + msecs_to_jiffies(1000); while (1) { u32 axi_status = hisi_sas_read32(hisi_hba, AXI_CFG); if (axi_status == 0) break; msleep(20); if (time_after(jiffies, end_time)) return -EIO; } if (ACPI_HANDLE(dev)) { acpi_status s; s = acpi_evaluate_object(ACPI_HANDLE(dev), "_RST", NULL, NULL); if (ACPI_FAILURE(s)) { dev_err(dev, "Reset failed\n"); return -EIO; } } else if (hisi_hba->ctrl) { /* reset and disable clock*/ regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg, reset_val); regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg + 4, reset_val); msleep(1); regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, &val); if (reset_val != (val & reset_val)) { dev_err(dev, "SAS reset fail.\n"); return -EIO; } /* De-reset and enable clock*/ regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg + 4, reset_val); regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg, reset_val); msleep(1); regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, &val); if (val & reset_val) { dev_err(dev, "SAS de-reset fail.\n"); return -EIO; } } else { dev_err(dev, "no reset method\n"); return -EINVAL; } return 0; } /* This function needs to be called after resetting SAS controller. */ static void phys_reject_stp_links_v2_hw(struct hisi_hba *hisi_hba) { u32 cfg; int phy_no; hisi_hba->reject_stp_links_msk = (1 << hisi_hba->n_phy) - 1; for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { cfg = hisi_sas_phy_read32(hisi_hba, phy_no, CON_CONTROL); if (!(cfg & CON_CONTROL_CFG_OPEN_ACC_STP_MSK)) continue; cfg &= ~CON_CONTROL_CFG_OPEN_ACC_STP_MSK; hisi_sas_phy_write32(hisi_hba, phy_no, CON_CONTROL, cfg); } } static void phys_try_accept_stp_links_v2_hw(struct hisi_hba *hisi_hba) { int phy_no; u32 dma_tx_dfx1; for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { if (!(hisi_hba->reject_stp_links_msk & BIT(phy_no))) continue; dma_tx_dfx1 = hisi_sas_phy_read32(hisi_hba, phy_no, DMA_TX_DFX1); if (dma_tx_dfx1 & DMA_TX_DFX1_IPTT_MSK) { u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, CON_CONTROL); cfg |= CON_CONTROL_CFG_OPEN_ACC_STP_MSK; hisi_sas_phy_write32(hisi_hba, phy_no, CON_CONTROL, cfg); clear_bit(phy_no, &hisi_hba->reject_stp_links_msk); } } } static const struct signal_attenuation_s x6000 = {9200, 0, 10476}; static const struct sig_atten_lu_s sig_atten_lu[] = { { &x6000, 0x3016a68 }, }; static void init_reg_v2_hw(struct hisi_hba *hisi_hba) { struct device *dev = hisi_hba->dev; u32 sas_phy_ctrl = 0x30b9908; u32 signal[3]; int i; /* Global registers init */ /* Deal with am-max-transmissions quirk */ if (device_property_present(dev, "hip06-sas-v2-quirk-amt")) { hisi_sas_write32(hisi_hba, AM_CFG_MAX_TRANS, 0x2020); hisi_sas_write32(hisi_hba, AM_CFG_SINGLE_PORT_MAX_TRANS, 0x2020); } /* Else, use defaults -> do nothing */ hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, (u32)((1ULL << hisi_hba->queue_count) - 1)); hisi_sas_write32(hisi_hba, AXI_USER1, 0xc0000000); hisi_sas_write32(hisi_hba, AXI_USER2, 0x10000); hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x0); hisi_sas_write32(hisi_hba, HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL, 0x7FF); hisi_sas_write32(hisi_hba, OPENA_WT_CONTI_TIME, 0x1); hisi_sas_write32(hisi_hba, I_T_NEXUS_LOSS_TIME, 0x1F4); hisi_sas_write32(hisi_hba, MAX_CON_TIME_LIMIT_TIME, 0x32); hisi_sas_write32(hisi_hba, BUS_INACTIVE_LIMIT_TIME, 0x1); hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x1); hisi_sas_write32(hisi_hba, HGC_ERR_STAT_EN, 0x1); hisi_sas_write32(hisi_hba, HGC_GET_ITV_TIME, 0x1); hisi_sas_write32(hisi_hba, INT_COAL_EN, 0xc); hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x60); hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x3); hisi_sas_write32(hisi_hba, ENT_INT_COAL_TIME, 0x1); hisi_sas_write32(hisi_hba, ENT_INT_COAL_CNT, 0x1); hisi_sas_write32(hisi_hba, OQ_INT_SRC, 0x0); hisi_sas_write32(hisi_hba, ENT_INT_SRC1, 0xffffffff); hisi_sas_write32(hisi_hba, ENT_INT_SRC2, 0xffffffff); hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff); hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0x7efefefe); hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0x7efefefe); hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0x7ffe20fe); hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xfff00c30); for (i = 0; i < hisi_hba->queue_count; i++) hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0); hisi_sas_write32(hisi_hba, AXI_AHB_CLK_CFG, 1); hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1); /* Get sas_phy_ctrl value to deal with TX FFE issue. */ if (!device_property_read_u32_array(dev, "hisilicon,signal-attenuation", signal, ARRAY_SIZE(signal))) { for (i = 0; i < ARRAY_SIZE(sig_atten_lu); i++) { const struct sig_atten_lu_s *lookup = &sig_atten_lu[i]; const struct signal_attenuation_s *att = lookup->att; if ((signal[0] == att->de_emphasis) && (signal[1] == att->preshoot) && (signal[2] == att->boost)) { sas_phy_ctrl = lookup->sas_phy_ctrl; break; } } if (i == ARRAY_SIZE(sig_atten_lu)) dev_warn(dev, "unknown signal attenuation values, using default PHY ctrl config\n"); } for (i = 0; i < hisi_hba->n_phy; i++) { struct hisi_sas_phy *phy = &hisi_hba->phy[i]; struct asd_sas_phy *sas_phy = &phy->sas_phy; u32 prog_phy_link_rate = 0x800; if (!sas_phy->phy || (sas_phy->phy->maximum_linkrate < SAS_LINK_RATE_1_5_GBPS)) { prog_phy_link_rate = 0x855; } else { enum sas_linkrate max = sas_phy->phy->maximum_linkrate; prog_phy_link_rate = hisi_sas_get_prog_phy_linkrate_mask(max) | 0x800; } hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, prog_phy_link_rate); hisi_sas_phy_write32(hisi_hba, i, SAS_PHY_CTRL, sas_phy_ctrl); hisi_sas_phy_write32(hisi_hba, i, SL_TOUT_CFG, 0x7d7d7d7d); hisi_sas_phy_write32(hisi_hba, i, SL_CONTROL, 0x0); hisi_sas_phy_write32(hisi_hba, i, TXID_AUTO, 0x2); hisi_sas_phy_write32(hisi_hba, i, DONE_RECEIVED_TIME, 0x8); hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff); hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff); hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xfff87fff); hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000); hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xff857fff); hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbfe); hisi_sas_phy_write32(hisi_hba, i, SL_CFG, 0x13f801fc); hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0); hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0); hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0); hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x0); hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0); hisi_sas_phy_write32(hisi_hba, i, CHL_INT_COAL_EN, 0x0); hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x0); if (hisi_hba->refclk_frequency_mhz == 66) hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL, 0x199B694); /* else, do nothing -> leave it how you found it */ } for (i = 0; i < hisi_hba->queue_count; i++) { /* Delivery queue */ hisi_sas_write32(hisi_hba, DLVRY_Q_0_BASE_ADDR_HI + (i * 0x14), upper_32_bits(hisi_hba->cmd_hdr_dma[i])); hisi_sas_write32(hisi_hba, DLVRY_Q_0_BASE_ADDR_LO + (i * 0x14), lower_32_bits(hisi_hba->cmd_hdr_dma[i])); hisi_sas_write32(hisi_hba, DLVRY_Q_0_DEPTH + (i * 0x14), HISI_SAS_QUEUE_SLOTS); /* Completion queue */ hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_HI + (i * 0x14), upper_32_bits(hisi_hba->complete_hdr_dma[i])); hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_LO + (i * 0x14), lower_32_bits(hisi_hba->complete_hdr_dma[i])); hisi_sas_write32(hisi_hba, COMPL_Q_0_DEPTH + (i * 0x14), HISI_SAS_QUEUE_SLOTS); } /* itct */ hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_LO, lower_32_bits(hisi_hba->itct_dma)); hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_HI, upper_32_bits(hisi_hba->itct_dma)); /* iost */ hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_LO, lower_32_bits(hisi_hba->iost_dma)); hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_HI, upper_32_bits(hisi_hba->iost_dma)); /* breakpoint */ hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_LO, lower_32_bits(hisi_hba->breakpoint_dma)); hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_HI, upper_32_bits(hisi_hba->breakpoint_dma)); /* SATA broken msg */ hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_LO, lower_32_bits(hisi_hba->sata_breakpoint_dma)); hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_HI, upper_32_bits(hisi_hba->sata_breakpoint_dma)); /* SATA initial fis */ hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_LO, lower_32_bits(hisi_hba->initial_fis_dma)); hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_HI, upper_32_bits(hisi_hba->initial_fis_dma)); } static void link_timeout_enable_link(struct timer_list *t) { struct hisi_hba *hisi_hba = from_timer(hisi_hba, t, timer); int i, reg_val; for (i = 0; i < hisi_hba->n_phy; i++) { if (hisi_hba->reject_stp_links_msk & BIT(i)) continue; reg_val = hisi_sas_phy_read32(hisi_hba, i, CON_CONTROL); if (!(reg_val & BIT(0))) { hisi_sas_phy_write32(hisi_hba, i, CON_CONTROL, 0x7); break; } } hisi_hba->timer.function = link_timeout_disable_link; mod_timer(&hisi_hba->timer, jiffies + msecs_to_jiffies(900)); } static void link_timeout_disable_link(struct timer_list *t) { struct hisi_hba *hisi_hba = from_timer(hisi_hba, t, timer); int i, reg_val; reg_val = hisi_sas_read32(hisi_hba, PHY_STATE); for (i = 0; i < hisi_hba->n_phy && reg_val; i++) { if (hisi_hba->reject_stp_links_msk & BIT(i)) continue; if (reg_val & BIT(i)) { hisi_sas_phy_write32(hisi_hba, i, CON_CONTROL, 0x6); break; } } hisi_hba->timer.function = link_timeout_enable_link; mod_timer(&hisi_hba->timer, jiffies + msecs_to_jiffies(100)); } static void set_link_timer_quirk(struct hisi_hba *hisi_hba) { hisi_hba->timer.function = link_timeout_disable_link; hisi_hba->timer.expires = jiffies + msecs_to_jiffies(1000); add_timer(&hisi_hba->timer); } static int hw_init_v2_hw(struct hisi_hba *hisi_hba) { struct device *dev = hisi_hba->dev; int rc; rc = reset_hw_v2_hw(hisi_hba); if (rc) { dev_err(dev, "hisi_sas_reset_hw failed, rc=%d\n", rc); return rc; } msleep(100); init_reg_v2_hw(hisi_hba); return 0; } static void enable_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) { u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); cfg |= PHY_CFG_ENA_MSK; hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); } static bool is_sata_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) { u32 context; context = hisi_sas_read32(hisi_hba, PHY_CONTEXT); if (context & (1 << phy_no)) return true; return false; } static bool tx_fifo_is_empty_v2_hw(struct hisi_hba *hisi_hba, int phy_no) { u32 dfx_val; dfx_val = hisi_sas_phy_read32(hisi_hba, phy_no, DMA_TX_DFX1); if (dfx_val & BIT(16)) return false; return true; } static bool axi_bus_is_idle_v2_hw(struct hisi_hba *hisi_hba, int phy_no) { int i, max_loop = 1000; struct device *dev = hisi_hba->dev; u32 status, axi_status, dfx_val, dfx_tx_val; for (i = 0; i < max_loop; i++) { status = hisi_sas_read32_relaxed(hisi_hba, AXI_MASTER_CFG_BASE + AM_CURR_TRANS_RETURN); axi_status = hisi_sas_read32(hisi_hba, AXI_CFG); dfx_val = hisi_sas_phy_read32(hisi_hba, phy_no, DMA_TX_DFX1); dfx_tx_val = hisi_sas_phy_read32(hisi_hba, phy_no, DMA_TX_FIFO_DFX0); if ((status == 0x3) && (axi_status == 0x0) && (dfx_val & BIT(20)) && (dfx_tx_val & BIT(10))) return true; udelay(10); } dev_err(dev, "bus is not idle phy%d, axi150:0x%x axi100:0x%x port204:0x%x port240:0x%x\n", phy_no, status, axi_status, dfx_val, dfx_tx_val); return false; } static bool wait_io_done_v2_hw(struct hisi_hba *hisi_hba, int phy_no) { int i, max_loop = 1000; struct device *dev = hisi_hba->dev; u32 status, tx_dfx0; for (i = 0; i < max_loop; i++) { status = hisi_sas_phy_read32(hisi_hba, phy_no, LINK_DFX2); status = (status & 0x3fc0) >> 6; if (status != 0x1) return true; tx_dfx0 = hisi_sas_phy_read32(hisi_hba, phy_no, DMA_TX_DFX0); if ((tx_dfx0 & 0x1ff) == 0x2) return true; udelay(10); } dev_err(dev, "IO not done phy%d, port264:0x%x port200:0x%x\n", phy_no, status, tx_dfx0); return false; } static bool allowed_disable_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) { if (tx_fifo_is_empty_v2_hw(hisi_hba, phy_no)) return true; if (!axi_bus_is_idle_v2_hw(hisi_hba, phy_no)) return false; if (!wait_io_done_v2_hw(hisi_hba, phy_no)) return false; return true; } static void disable_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) { u32 cfg, axi_val, dfx0_val, txid_auto; struct device *dev = hisi_hba->dev; /* Close axi bus. */ axi_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE + AM_CTRL_GLOBAL); axi_val |= 0x1; hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + AM_CTRL_GLOBAL, axi_val); if (is_sata_phy_v2_hw(hisi_hba, phy_no)) { if (allowed_disable_phy_v2_hw(hisi_hba, phy_no)) goto do_disable; /* Reset host controller. */ queue_work(hisi_hba->wq, &hisi_hba->rst_work); return; } dfx0_val = hisi_sas_phy_read32(hisi_hba, phy_no, PORT_DFX0); dfx0_val = (dfx0_val & 0x1fc0) >> 6; if (dfx0_val != 0x4) goto do_disable; if (!tx_fifo_is_empty_v2_hw(hisi_hba, phy_no)) { dev_warn(dev, "phy%d, wait tx fifo need send break\n", phy_no); txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO); txid_auto |= TXID_AUTO_CTB_MSK; hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, txid_auto); } do_disable: cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); cfg &= ~PHY_CFG_ENA_MSK; hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); /* Open axi bus. */ axi_val &= ~0x1; hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + AM_CTRL_GLOBAL, axi_val); } static void start_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) { config_id_frame_v2_hw(hisi_hba, phy_no); config_phy_opt_mode_v2_hw(hisi_hba, phy_no); enable_phy_v2_hw(hisi_hba, phy_no); } static void phy_hard_reset_v2_hw(struct hisi_hba *hisi_hba, int phy_no) { struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; u32 txid_auto; hisi_sas_phy_enable(hisi_hba, phy_no, 0); if (phy->identify.device_type == SAS_END_DEVICE) { txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO); hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, txid_auto | TX_HARDRST_MSK); } msleep(100); hisi_sas_phy_enable(hisi_hba, phy_no, 1); } static void phy_get_events_v2_hw(struct hisi_hba *hisi_hba, int phy_no) { struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; struct asd_sas_phy *sas_phy = &phy->sas_phy; struct sas_phy *sphy = sas_phy->phy; u32 err4_reg_val, err6_reg_val; /* loss dword syn, phy reset problem */ err4_reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, SAS_ERR_CNT4_REG); /* disparity err, invalid dword */ err6_reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, SAS_ERR_CNT6_REG); sphy->loss_of_dword_sync_count += (err4_reg_val >> 16) & 0xFFFF; sphy->phy_reset_problem_count += err4_reg_val & 0xFFFF; sphy->invalid_dword_count += (err6_reg_val & 0xFF0000) >> 16; sphy->running_disparity_error_count += err6_reg_val & 0xFF; } static void phys_init_v2_hw(struct hisi_hba *hisi_hba) { int i; for (i = 0; i < hisi_hba->n_phy; i++) { struct hisi_sas_phy *phy = &hisi_hba->phy[i]; struct asd_sas_phy *sas_phy = &phy->sas_phy; if (!sas_phy->phy->enabled) continue; hisi_sas_phy_enable(hisi_hba, i, 1); } } static void sl_notify_ssp_v2_hw(struct hisi_hba *hisi_hba, int phy_no) { u32 sl_control; sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); sl_control |= SL_CONTROL_NOTIFY_EN_MSK; hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); msleep(1); sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); sl_control &= ~SL_CONTROL_NOTIFY_EN_MSK; hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); } static enum sas_linkrate phy_get_max_linkrate_v2_hw(void) { return SAS_LINK_RATE_12_0_GBPS; } static void phy_set_linkrate_v2_hw(struct hisi_hba *hisi_hba, int phy_no, struct sas_phy_linkrates *r) { enum sas_linkrate max = r->maximum_linkrate; u32 prog_phy_link_rate = 0x800; prog_phy_link_rate |= hisi_sas_get_prog_phy_linkrate_mask(max); hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE, prog_phy_link_rate); } static int get_wideport_bitmap_v2_hw(struct hisi_hba *hisi_hba, int port_id) { int i, bitmap = 0; u32 phy_port_num_ma = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); for (i = 0; i < (hisi_hba->n_phy < 9 ? hisi_hba->n_phy : 8); i++) if (phy_state & 1 << i) if (((phy_port_num_ma >> (i * 4)) & 0xf) == port_id) bitmap |= 1 << i; if (hisi_hba->n_phy == 9) { u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE); if (phy_state & 1 << 8) if (((port_state & PORT_STATE_PHY8_PORT_NUM_MSK) >> PORT_STATE_PHY8_PORT_NUM_OFF) == port_id) bitmap |= 1 << 9; } return bitmap; } /* DQ lock must be taken here */ static void start_delivery_v2_hw(struct hisi_sas_dq *dq) { struct hisi_hba *hisi_hba = dq->hisi_hba; struct hisi_sas_slot *s, *s1, *s2 = NULL; int dlvry_queue = dq->id; int wp; list_for_each_entry_safe(s, s1, &dq->list, delivery) { if (!s->ready) break; s2 = s; list_del(&s->delivery); } if (!s2) return; /* * Ensure that memories for slots built on other CPUs is observed. */ smp_rmb(); wp = (s2->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS; hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp); } static void prep_prd_sge_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot, struct hisi_sas_cmd_hdr *hdr, struct scatterlist *scatter, int n_elem) { struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot); struct scatterlist *sg; int i; for_each_sg(scatter, sg, n_elem, i) { struct hisi_sas_sge *entry = &sge_page->sge[i]; entry->addr = cpu_to_le64(sg_dma_address(sg)); entry->page_ctrl_0 = entry->page_ctrl_1 = 0; entry->data_len = cpu_to_le32(sg_dma_len(sg)); entry->data_off = 0; } hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot)); hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF); } static void prep_smp_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) { struct sas_task *task = slot->task; struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; struct domain_device *device = task->dev; struct hisi_sas_port *port = slot->port; struct scatterlist *sg_req; struct hisi_sas_device *sas_dev = device->lldd_dev; dma_addr_t req_dma_addr; unsigned int req_len; /* req */ sg_req = &task->smp_task.smp_req; req_dma_addr = sg_dma_address(sg_req); req_len = sg_dma_len(&task->smp_task.smp_req); /* create header */ /* dw0 */ hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) | (1 << CMD_HDR_PRIORITY_OFF) | /* high pri */ (2 << CMD_HDR_CMD_OFF)); /* smp */ /* map itct entry */ hdr->dw1 = cpu_to_le32((sas_dev->device_id << CMD_HDR_DEV_ID_OFF) | (1 << CMD_HDR_FRAME_TYPE_OFF) | (DIR_NO_DATA << CMD_HDR_DIR_OFF)); /* dw2 */ hdr->dw2 = cpu_to_le32((((req_len - 4) / 4) << CMD_HDR_CFL_OFF) | (HISI_SAS_MAX_SMP_RESP_SZ / 4 << CMD_HDR_MRFL_OFF)); hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF); hdr->cmd_table_addr = cpu_to_le64(req_dma_addr); hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); } static void prep_ssp_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) { struct sas_task *task = slot->task; struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; struct domain_device *device = task->dev; struct hisi_sas_device *sas_dev = device->lldd_dev; struct hisi_sas_port *port = slot->port; struct sas_ssp_task *ssp_task = &task->ssp_task; struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; struct sas_tmf_task *tmf = slot->tmf; int has_data = 0, priority = !!tmf; u8 *buf_cmd; u32 dw1 = 0, dw2 = 0; hdr->dw0 = cpu_to_le32((1 << CMD_HDR_RESP_REPORT_OFF) | (2 << CMD_HDR_TLR_CTRL_OFF) | (port->id << CMD_HDR_PORT_OFF) | (priority << CMD_HDR_PRIORITY_OFF) | (1 << CMD_HDR_CMD_OFF)); /* ssp */ dw1 = 1 << CMD_HDR_VDTL_OFF; if (tmf) { dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF; dw1 |= DIR_NO_DATA << CMD_HDR_DIR_OFF; } else { dw1 |= 1 << CMD_HDR_FRAME_TYPE_OFF; switch (scsi_cmnd->sc_data_direction) { case DMA_TO_DEVICE: has_data = 1; dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF; break; case DMA_FROM_DEVICE: has_data = 1; dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF; break; default: dw1 &= ~CMD_HDR_DIR_MSK; } } /* map itct entry */ dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; hdr->dw1 = cpu_to_le32(dw1); dw2 = (((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr) + 3) / 4) << CMD_HDR_CFL_OFF) | ((HISI_SAS_MAX_SSP_RESP_SZ / 4) << CMD_HDR_MRFL_OFF) | (2 << CMD_HDR_SG_MOD_OFF); hdr->dw2 = cpu_to_le32(dw2); hdr->transfer_tags = cpu_to_le32(slot->idx); if (has_data) prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter, slot->n_elem); hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot) + sizeof(struct ssp_frame_hdr); memcpy(buf_cmd, &task->ssp_task.LUN, 8); if (!tmf) { buf_cmd[9] = task->ssp_task.task_attr; memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd, task->ssp_task.cmd->cmd_len); } else { buf_cmd[10] = tmf->tmf; switch (tmf->tmf) { case TMF_ABORT_TASK: case TMF_QUERY_TASK: buf_cmd[12] = (tmf->tag_of_task_to_be_managed >> 8) & 0xff; buf_cmd[13] = tmf->tag_of_task_to_be_managed & 0xff; break; default: break; } } } #define TRANS_TX_ERR 0 #define TRANS_RX_ERR 1 #define DMA_TX_ERR 2 #define SIPC_RX_ERR 3 #define DMA_RX_ERR 4 #define DMA_TX_ERR_OFF 0 #define DMA_TX_ERR_MSK (0xffff << DMA_TX_ERR_OFF) #define SIPC_RX_ERR_OFF 16 #define SIPC_RX_ERR_MSK (0xffff << SIPC_RX_ERR_OFF) static int parse_trans_tx_err_code_v2_hw(u32 err_msk) { static const u8 trans_tx_err_code_prio[] = { TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS, TRANS_TX_ERR_PHY_NOT_ENABLE, TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION, TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION, TRANS_TX_OPEN_CNX_ERR_BY_OTHER, RESERVED0, TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT, TRANS_TX_OPEN_CNX_ERR_STP_RESOURCES_BUSY, TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED, TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED, TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION, TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD, TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER, TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED, TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT, TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION, TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED, TRANS_TX_ERR_WITH_CLOSE_PHYDISALE, TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT, TRANS_TX_ERR_WITH_CLOSE_COMINIT, TRANS_TX_ERR_WITH_BREAK_TIMEOUT, TRANS_TX_ERR_WITH_BREAK_REQUEST, TRANS_TX_ERR_WITH_BREAK_RECEVIED, TRANS_TX_ERR_WITH_CLOSE_TIMEOUT, TRANS_TX_ERR_WITH_CLOSE_NORMAL, TRANS_TX_ERR_WITH_NAK_RECEVIED, TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT, TRANS_TX_ERR_WITH_CREDIT_TIMEOUT, TRANS_TX_ERR_WITH_IPTT_CONFLICT, TRANS_TX_ERR_WITH_OPEN_BY_DES_OR_OTHERS, TRANS_TX_ERR_WITH_WAIT_RECV_TIMEOUT, }; int index, i; for (i = 0; i < ARRAY_SIZE(trans_tx_err_code_prio); i++) { index = trans_tx_err_code_prio[i] - TRANS_TX_FAIL_BASE; if (err_msk & (1 << index)) return trans_tx_err_code_prio[i]; } return -1; } static int parse_trans_rx_err_code_v2_hw(u32 err_msk) { static const u8 trans_rx_err_code_prio[] = { TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR, TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR, TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM, TRANS_RX_ERR_WITH_RXFIS_DECODE_ERROR, TRANS_RX_ERR_WITH_RXFIS_CRC_ERR, TRANS_RX_ERR_WITH_RXFRAME_LENGTH_OVERRUN, TRANS_RX_ERR_WITH_RXFIS_RX_SYNCP, TRANS_RX_ERR_WITH_LINK_BUF_OVERRUN, TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE, TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT, TRANS_RX_ERR_WITH_CLOSE_COMINIT, TRANS_RX_ERR_WITH_BREAK_TIMEOUT, TRANS_RX_ERR_WITH_BREAK_REQUEST, TRANS_RX_ERR_WITH_BREAK_RECEVIED, RESERVED1, TRANS_RX_ERR_WITH_CLOSE_NORMAL, TRANS_RX_ERR_WITH_DATA_LEN0, TRANS_RX_ERR_WITH_BAD_HASH, TRANS_RX_XRDY_WLEN_ZERO_ERR, TRANS_RX_SSP_FRM_LEN_ERR, RESERVED2, RESERVED3, RESERVED4, RESERVED5, TRANS_RX_ERR_WITH_BAD_FRM_TYPE, TRANS_RX_SMP_FRM_LEN_ERR, TRANS_RX_SMP_RESP_TIMEOUT_ERR, RESERVED6, RESERVED7, RESERVED8, RESERVED9, TRANS_RX_R_ERR, }; int index, i; for (i = 0; i < ARRAY_SIZE(trans_rx_err_code_prio); i++) { index = trans_rx_err_code_prio[i] - TRANS_RX_FAIL_BASE; if (err_msk & (1 << index)) return trans_rx_err_code_prio[i]; } return -1; } static int parse_dma_tx_err_code_v2_hw(u32 err_msk) { static const u8 dma_tx_err_code_prio[] = { DMA_TX_UNEXP_XFER_ERR, DMA_TX_UNEXP_RETRANS_ERR, DMA_TX_XFER_LEN_OVERFLOW, DMA_TX_XFER_OFFSET_ERR, DMA_TX_RAM_ECC_ERR, DMA_TX_DIF_LEN_ALIGN_ERR, DMA_TX_DIF_CRC_ERR, DMA_TX_DIF_APP_ERR, DMA_TX_DIF_RPP_ERR, DMA_TX_DATA_SGL_OVERFLOW, DMA_TX_DIF_SGL_OVERFLOW, }; int index, i; for (i = 0; i < ARRAY_SIZE(dma_tx_err_code_prio); i++) { index = dma_tx_err_code_prio[i] - DMA_TX_ERR_BASE; err_msk = err_msk & DMA_TX_ERR_MSK; if (err_msk & (1 << index)) return dma_tx_err_code_prio[i]; } return -1; } static int parse_sipc_rx_err_code_v2_hw(u32 err_msk) { static const u8 sipc_rx_err_code_prio[] = { SIPC_RX_FIS_STATUS_ERR_BIT_VLD, SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR, SIPC_RX_FIS_STATUS_BSY_BIT_ERR, SIPC_RX_WRSETUP_LEN_ODD_ERR, SIPC_RX_WRSETUP_LEN_ZERO_ERR, SIPC_RX_WRDATA_LEN_NOT_MATCH_ERR, SIPC_RX_NCQ_WRSETUP_OFFSET_ERR, SIPC_RX_NCQ_WRSETUP_AUTO_ACTIVE_ERR, SIPC_RX_SATA_UNEXP_FIS_ERR, SIPC_RX_WRSETUP_ESTATUS_ERR, SIPC_RX_DATA_UNDERFLOW_ERR, }; int index, i; for (i = 0; i < ARRAY_SIZE(sipc_rx_err_code_prio); i++) { index = sipc_rx_err_code_prio[i] - SIPC_RX_ERR_BASE; err_msk = err_msk & SIPC_RX_ERR_MSK; if (err_msk & (1 << (index + 0x10))) return sipc_rx_err_code_prio[i]; } return -1; } static int parse_dma_rx_err_code_v2_hw(u32 err_msk) { static const u8 dma_rx_err_code_prio[] = { DMA_RX_UNKNOWN_FRM_ERR, DMA_RX_DATA_LEN_OVERFLOW, DMA_RX_DATA_LEN_UNDERFLOW, DMA_RX_DATA_OFFSET_ERR, RESERVED10, DMA_RX_SATA_FRAME_TYPE_ERR, DMA_RX_RESP_BUF_OVERFLOW, DMA_RX_UNEXP_RETRANS_RESP_ERR, DMA_RX_UNEXP_NORM_RESP_ERR, DMA_RX_UNEXP_RDFRAME_ERR, DMA_RX_PIO_DATA_LEN_ERR, DMA_RX_RDSETUP_STATUS_ERR, DMA_RX_RDSETUP_STATUS_DRQ_ERR, DMA_RX_RDSETUP_STATUS_BSY_ERR, DMA_RX_RDSETUP_LEN_ODD_ERR, DMA_RX_RDSETUP_LEN_ZERO_ERR, DMA_RX_RDSETUP_LEN_OVER_ERR, DMA_RX_RDSETUP_OFFSET_ERR, DMA_RX_RDSETUP_ACTIVE_ERR, DMA_RX_RDSETUP_ESTATUS_ERR, DMA_RX_RAM_ECC_ERR, DMA_RX_DIF_CRC_ERR, DMA_RX_DIF_APP_ERR, DMA_RX_DIF_RPP_ERR, DMA_RX_DATA_SGL_OVERFLOW, DMA_RX_DIF_SGL_OVERFLOW, }; int index, i; for (i = 0; i < ARRAY_SIZE(dma_rx_err_code_prio); i++) { index = dma_rx_err_code_prio[i] - DMA_RX_ERR_BASE; if (err_msk & (1 << index)) return dma_rx_err_code_prio[i]; } return -1; } /* by default, task resp is complete */ static void slot_err_v2_hw(struct hisi_hba *hisi_hba, struct sas_task *task, struct hisi_sas_slot *slot, int err_phase) { struct task_status_struct *ts = &task->task_status; struct hisi_sas_err_record_v2 *err_record = hisi_sas_status_buf_addr_mem(slot); u32 trans_tx_fail_type = le32_to_cpu(err_record->trans_tx_fail_type); u32 trans_rx_fail_type = le32_to_cpu(err_record->trans_rx_fail_type); u16 dma_tx_err_type = le16_to_cpu(err_record->dma_tx_err_type); u16 sipc_rx_err_type = le16_to_cpu(err_record->sipc_rx_err_type); u32 dma_rx_err_type = le32_to_cpu(err_record->dma_rx_err_type); struct hisi_sas_complete_v2_hdr *complete_queue = hisi_hba->complete_hdr[slot->cmplt_queue]; struct hisi_sas_complete_v2_hdr *complete_hdr = &complete_queue[slot->cmplt_queue_slot]; u32 dw0 = le32_to_cpu(complete_hdr->dw0); int error = -1; if (err_phase == 1) { /* error in TX phase, the priority of error is: DW2 > DW0 */ error = parse_dma_tx_err_code_v2_hw(dma_tx_err_type); if (error == -1) error = parse_trans_tx_err_code_v2_hw( trans_tx_fail_type); } else if (err_phase == 2) { /* error in RX phase, the priority is: DW1 > DW3 > DW2 */ error = parse_trans_rx_err_code_v2_hw(trans_rx_fail_type); if (error == -1) { error = parse_dma_rx_err_code_v2_hw( dma_rx_err_type); if (error == -1) error = parse_sipc_rx_err_code_v2_hw( sipc_rx_err_type); } } switch (task->task_proto) { case SAS_PROTOCOL_SSP: { switch (error) { case TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION: { ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_NO_DEST; break; } case TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED: { ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_EPROTO; break; } case TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED: { ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_CONN_RATE; break; } case TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION: { ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_BAD_DEST; break; } case TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION: { ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_WRONG_DEST; break; } case DMA_RX_UNEXP_NORM_RESP_ERR: case TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION: case DMA_RX_RESP_BUF_OVERFLOW: { ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_UNKNOWN; break; } case TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER: { /* not sure */ ts->stat = SAS_DEV_NO_RESPONSE; break; } case DMA_RX_DATA_LEN_OVERFLOW: { ts->stat = SAS_DATA_OVERRUN; ts->residual = 0; break; } case DMA_RX_DATA_LEN_UNDERFLOW: { ts->residual = trans_tx_fail_type; ts->stat = SAS_DATA_UNDERRUN; break; } case TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS: case TRANS_TX_ERR_PHY_NOT_ENABLE: case TRANS_TX_OPEN_CNX_ERR_BY_OTHER: case TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT: case TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD: case TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED: case TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT: case TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED: case TRANS_TX_ERR_WITH_BREAK_TIMEOUT: case TRANS_TX_ERR_WITH_BREAK_REQUEST: case TRANS_TX_ERR_WITH_BREAK_RECEVIED: case TRANS_TX_ERR_WITH_CLOSE_TIMEOUT: case TRANS_TX_ERR_WITH_CLOSE_NORMAL: case TRANS_TX_ERR_WITH_CLOSE_PHYDISALE: case TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT: case TRANS_TX_ERR_WITH_CLOSE_COMINIT: case TRANS_TX_ERR_WITH_NAK_RECEVIED: case TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT: case TRANS_TX_ERR_WITH_CREDIT_TIMEOUT: case TRANS_TX_ERR_WITH_IPTT_CONFLICT: case TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR: case TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR: case TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM: case TRANS_RX_ERR_WITH_LINK_BUF_OVERRUN: case TRANS_RX_ERR_WITH_BREAK_TIMEOUT: case TRANS_RX_ERR_WITH_BREAK_REQUEST: case TRANS_RX_ERR_WITH_BREAK_RECEVIED: case TRANS_RX_ERR_WITH_CLOSE_NORMAL: case TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT: case TRANS_RX_ERR_WITH_CLOSE_COMINIT: case TRANS_TX_ERR_FRAME_TXED: case TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE: case TRANS_RX_ERR_WITH_DATA_LEN0: case TRANS_RX_ERR_WITH_BAD_HASH: case TRANS_RX_XRDY_WLEN_ZERO_ERR: case TRANS_RX_SSP_FRM_LEN_ERR: case TRANS_RX_ERR_WITH_BAD_FRM_TYPE: case DMA_TX_DATA_SGL_OVERFLOW: case DMA_TX_UNEXP_XFER_ERR: case DMA_TX_UNEXP_RETRANS_ERR: case DMA_TX_XFER_LEN_OVERFLOW: case DMA_TX_XFER_OFFSET_ERR: case SIPC_RX_DATA_UNDERFLOW_ERR: case DMA_RX_DATA_SGL_OVERFLOW: case DMA_RX_DATA_OFFSET_ERR: case DMA_RX_RDSETUP_LEN_ODD_ERR: case DMA_RX_RDSETUP_LEN_ZERO_ERR: case DMA_RX_RDSETUP_LEN_OVER_ERR: case DMA_RX_SATA_FRAME_TYPE_ERR: case DMA_RX_UNKNOWN_FRM_ERR: { /* This will request a retry */ ts->stat = SAS_QUEUE_FULL; slot->abort = 1; break; } default: break; } } break; case SAS_PROTOCOL_SMP: ts->stat = SAS_SAM_STAT_CHECK_CONDITION; break; case SAS_PROTOCOL_SATA: case SAS_PROTOCOL_STP: case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: { switch (error) { case TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION: { ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_NO_DEST; break; } case TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER: { ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_DEV_NO_RESPONSE; break; } case TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED: { ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_EPROTO; break; } case TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED: { ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_CONN_RATE; break; } case TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION: { ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_CONN_RATE; break; } case TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION: { ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_WRONG_DEST; break; } case DMA_RX_RESP_BUF_OVERFLOW: case DMA_RX_UNEXP_NORM_RESP_ERR: case TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION: { ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_UNKNOWN; break; } case DMA_RX_DATA_LEN_OVERFLOW: { ts->stat = SAS_DATA_OVERRUN; ts->residual = 0; break; } case DMA_RX_DATA_LEN_UNDERFLOW: { ts->residual = trans_tx_fail_type; ts->stat = SAS_DATA_UNDERRUN; break; } case TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS: case TRANS_TX_ERR_PHY_NOT_ENABLE: case TRANS_TX_OPEN_CNX_ERR_BY_OTHER: case TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT: case TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD: case TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED: case TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT: case TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED: case TRANS_TX_ERR_WITH_BREAK_TIMEOUT: case TRANS_TX_ERR_WITH_BREAK_REQUEST: case TRANS_TX_ERR_WITH_BREAK_RECEVIED: case TRANS_TX_ERR_WITH_CLOSE_TIMEOUT: case TRANS_TX_ERR_WITH_CLOSE_NORMAL: case TRANS_TX_ERR_WITH_CLOSE_PHYDISALE: case TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT: case TRANS_TX_ERR_WITH_CLOSE_COMINIT: case TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT: case TRANS_TX_ERR_WITH_CREDIT_TIMEOUT: case TRANS_TX_ERR_WITH_OPEN_BY_DES_OR_OTHERS: case TRANS_TX_ERR_WITH_WAIT_RECV_TIMEOUT: case TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM: case TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR: case TRANS_RX_ERR_WITH_RXFIS_DECODE_ERROR: case TRANS_RX_ERR_WITH_RXFIS_CRC_ERR: case TRANS_RX_ERR_WITH_RXFRAME_LENGTH_OVERRUN: case TRANS_RX_ERR_WITH_RXFIS_RX_SYNCP: case TRANS_RX_ERR_WITH_LINK_BUF_OVERRUN: case TRANS_RX_ERR_WITH_BREAK_TIMEOUT: case TRANS_RX_ERR_WITH_BREAK_REQUEST: case TRANS_RX_ERR_WITH_BREAK_RECEVIED: case TRANS_RX_ERR_WITH_CLOSE_NORMAL: case TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE: case TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT: case TRANS_RX_ERR_WITH_CLOSE_COMINIT: case TRANS_RX_ERR_WITH_DATA_LEN0: case TRANS_RX_ERR_WITH_BAD_HASH: case TRANS_RX_XRDY_WLEN_ZERO_ERR: case TRANS_RX_ERR_WITH_BAD_FRM_TYPE: case DMA_TX_DATA_SGL_OVERFLOW: case DMA_TX_UNEXP_XFER_ERR: case DMA_TX_UNEXP_RETRANS_ERR: case DMA_TX_XFER_LEN_OVERFLOW: case DMA_TX_XFER_OFFSET_ERR: case SIPC_RX_FIS_STATUS_ERR_BIT_VLD: case SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR: case SIPC_RX_FIS_STATUS_BSY_BIT_ERR: case SIPC_RX_WRSETUP_LEN_ODD_ERR: case SIPC_RX_WRSETUP_LEN_ZERO_ERR: case SIPC_RX_WRDATA_LEN_NOT_MATCH_ERR: case SIPC_RX_SATA_UNEXP_FIS_ERR: case DMA_RX_DATA_SGL_OVERFLOW: case DMA_RX_DATA_OFFSET_ERR: case DMA_RX_SATA_FRAME_TYPE_ERR: case DMA_RX_UNEXP_RDFRAME_ERR: case DMA_RX_PIO_DATA_LEN_ERR: case DMA_RX_RDSETUP_STATUS_ERR: case DMA_RX_RDSETUP_STATUS_DRQ_ERR: case DMA_RX_RDSETUP_STATUS_BSY_ERR: case DMA_RX_RDSETUP_LEN_ODD_ERR: case DMA_RX_RDSETUP_LEN_ZERO_ERR: case DMA_RX_RDSETUP_LEN_OVER_ERR: case DMA_RX_RDSETUP_OFFSET_ERR: case DMA_RX_RDSETUP_ACTIVE_ERR: case DMA_RX_RDSETUP_ESTATUS_ERR: case DMA_RX_UNKNOWN_FRM_ERR: case TRANS_RX_SSP_FRM_LEN_ERR: case TRANS_TX_OPEN_CNX_ERR_STP_RESOURCES_BUSY: { slot->abort = 1; ts->stat = SAS_PHY_DOWN; break; } default: { ts->stat = SAS_PROTO_RESPONSE; break; } } if (dw0 & CMPLT_HDR_RSPNS_XFRD_MSK) hisi_sas_sata_done(task, slot); } break; default: break; } } static void slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) { struct sas_task *task = slot->task; struct hisi_sas_device *sas_dev; struct device *dev = hisi_hba->dev; struct task_status_struct *ts; struct domain_device *device; struct sas_ha_struct *ha; struct hisi_sas_complete_v2_hdr *complete_queue = hisi_hba->complete_hdr[slot->cmplt_queue]; struct hisi_sas_complete_v2_hdr *complete_hdr = &complete_queue[slot->cmplt_queue_slot]; unsigned long flags; bool is_internal = slot->is_internal; u32 dw0; if (unlikely(!task || !task->lldd_task || !task->dev)) return; ts = &task->task_status; device = task->dev; ha = device->port->ha; sas_dev = device->lldd_dev; spin_lock_irqsave(&task->task_state_lock, flags); task->task_state_flags &= ~SAS_TASK_STATE_PENDING; spin_unlock_irqrestore(&task->task_state_lock, flags); memset(ts, 0, sizeof(*ts)); ts->resp = SAS_TASK_COMPLETE; if (unlikely(!sas_dev)) { dev_dbg(dev, "slot complete: port has no device\n"); ts->stat = SAS_PHY_DOWN; goto out; } /* Use SAS+TMF status codes */ dw0 = le32_to_cpu(complete_hdr->dw0); switch ((dw0 & CMPLT_HDR_ABORT_STAT_MSK) >> CMPLT_HDR_ABORT_STAT_OFF) { case STAT_IO_ABORTED: /* this io has been aborted by abort command */ ts->stat = SAS_ABORTED_TASK; goto out; case STAT_IO_COMPLETE: /* internal abort command complete */ ts->stat = TMF_RESP_FUNC_SUCC; del_timer_sync(&slot->internal_abort_timer); goto out; case STAT_IO_NO_DEVICE: ts->stat = TMF_RESP_FUNC_COMPLETE; del_timer_sync(&slot->internal_abort_timer); goto out; case STAT_IO_NOT_VALID: /* abort single io, controller don't find * the io need to abort */ ts->stat = TMF_RESP_FUNC_FAILED; del_timer_sync(&slot->internal_abort_timer); goto out; default: break; } if ((dw0 & CMPLT_HDR_ERX_MSK) && (!(dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))) { u32 err_phase = (dw0 & CMPLT_HDR_ERR_PHASE_MSK) >> CMPLT_HDR_ERR_PHASE_OFF; u32 *error_info = hisi_sas_status_buf_addr_mem(slot); /* Analyse error happens on which phase TX or RX */ if (ERR_ON_TX_PHASE(err_phase)) slot_err_v2_hw(hisi_hba, task, slot, 1); else if (ERR_ON_RX_PHASE(err_phase)) slot_err_v2_hw(hisi_hba, task, slot, 2); if (ts->stat != SAS_DATA_UNDERRUN) dev_info(dev, "erroneous completion iptt=%d task=%pK dev id=%d CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n", slot->idx, task, sas_dev->device_id, complete_hdr->dw0, complete_hdr->dw1, complete_hdr->act, complete_hdr->dw3, error_info[0], error_info[1], error_info[2], error_info[3]); if (unlikely(slot->abort)) { if (dev_is_sata(device) && task->ata_task.use_ncq) sas_ata_device_link_abort(device, true); else sas_task_abort(task); return; } goto out; } switch (task->task_proto) { case SAS_PROTOCOL_SSP: { struct hisi_sas_status_buffer *status_buffer = hisi_sas_status_buf_addr_mem(slot); struct ssp_response_iu *iu = (struct ssp_response_iu *) &status_buffer->iu[0]; sas_ssp_task_response(dev, task, iu); break; } case SAS_PROTOCOL_SMP: { struct scatterlist *sg_resp = &task->smp_task.smp_resp; void *to = page_address(sg_page(sg_resp)); ts->stat = SAS_SAM_STAT_GOOD; memcpy(to + sg_resp->offset, hisi_sas_status_buf_addr_mem(slot) + sizeof(struct hisi_sas_err_record), sg_resp->length); break; } case SAS_PROTOCOL_SATA: case SAS_PROTOCOL_STP: case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: { ts->stat = SAS_SAM_STAT_GOOD; if (dw0 & CMPLT_HDR_RSPNS_XFRD_MSK) hisi_sas_sata_done(task, slot); break; } default: ts->stat = SAS_SAM_STAT_CHECK_CONDITION; break; } if (!slot->port->port_attached) { dev_warn(dev, "slot complete: port %d has removed\n", slot->port->sas_port.id); ts->stat = SAS_PHY_DOWN; } out: spin_lock_irqsave(&task->task_state_lock, flags); if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { spin_unlock_irqrestore(&task->task_state_lock, flags); dev_info(dev, "slot complete: task(%pK) aborted\n", task); return; } task->task_state_flags |= SAS_TASK_STATE_DONE; spin_unlock_irqrestore(&task->task_state_lock, flags); hisi_sas_slot_task_free(hisi_hba, task, slot, true); if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) { spin_lock_irqsave(&device->done_lock, flags); if (test_bit(SAS_HA_FROZEN, &ha->state)) { spin_unlock_irqrestore(&device->done_lock, flags); dev_info(dev, "slot complete: task(%pK) ignored\n", task); return; } spin_unlock_irqrestore(&device->done_lock, flags); } if (task->task_done) task->task_done(task); } static void prep_ata_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) { struct sas_task *task = slot->task; struct domain_device *device = task->dev; struct domain_device *parent_dev = device->parent; struct hisi_sas_device *sas_dev = device->lldd_dev; struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; struct asd_sas_port *sas_port = device->port; struct hisi_sas_port *port = to_hisi_sas_port(sas_port); struct sas_ata_task *ata_task = &task->ata_task; struct sas_tmf_task *tmf = slot->tmf; u8 *buf_cmd; int has_data = 0, hdr_tag = 0; u32 dw0, dw1 = 0, dw2 = 0; /* create header */ /* dw0 */ dw0 = port->id << CMD_HDR_PORT_OFF; if (parent_dev && dev_is_expander(parent_dev->dev_type)) dw0 |= 3 << CMD_HDR_CMD_OFF; else dw0 |= 4 << CMD_HDR_CMD_OFF; if (tmf && ata_task->force_phy) { dw0 |= CMD_HDR_FORCE_PHY_MSK; dw0 |= (1 << ata_task->force_phy_id) << CMD_HDR_PHY_ID_OFF; } hdr->dw0 = cpu_to_le32(dw0); /* dw1 */ switch (task->data_dir) { case DMA_TO_DEVICE: has_data = 1; dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF; break; case DMA_FROM_DEVICE: has_data = 1; dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF; break; default: dw1 &= ~CMD_HDR_DIR_MSK; } if ((task->ata_task.fis.command == ATA_CMD_DEV_RESET) && (task->ata_task.fis.control & ATA_SRST)) dw1 |= 1 << CMD_HDR_RESET_OFF; dw1 |= (hisi_sas_get_ata_protocol( &task->ata_task.fis, task->data_dir)) << CMD_HDR_FRAME_TYPE_OFF; dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; hdr->dw1 = cpu_to_le32(dw1); /* dw2 */ if (task->ata_task.use_ncq) { struct ata_queued_cmd *qc = task->uldd_task; hdr_tag = qc->tag; task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF; } dw2 |= (HISI_SAS_MAX_STP_RESP_SZ / 4) << CMD_HDR_CFL_OFF | 2 << CMD_HDR_SG_MOD_OFF; hdr->dw2 = cpu_to_le32(dw2); /* dw3 */ hdr->transfer_tags = cpu_to_le32(slot->idx); if (has_data) prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter, slot->n_elem); hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot); if (likely(!task->ata_task.device_control_reg_update)) task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ /* fill in command FIS */ memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); } static void hisi_sas_internal_abort_quirk_timeout(struct timer_list *t) { struct hisi_sas_slot *slot = from_timer(slot, t, internal_abort_timer); struct hisi_sas_port *port = slot->port; struct asd_sas_port *asd_sas_port; struct asd_sas_phy *sas_phy; if (!port) return; asd_sas_port = &port->sas_port; /* Kick the hardware - send break command */ list_for_each_entry(sas_phy, &asd_sas_port->phy_list, port_phy_el) { struct hisi_sas_phy *phy = sas_phy->lldd_phy; struct hisi_hba *hisi_hba = phy->hisi_hba; int phy_no = sas_phy->id; u32 link_dfx2; link_dfx2 = hisi_sas_phy_read32(hisi_hba, phy_no, LINK_DFX2); if ((link_dfx2 == LINK_DFX2_RCVR_HOLD_STS_MSK) || (link_dfx2 & LINK_DFX2_SEND_HOLD_STS_MSK)) { u32 txid_auto; txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO); txid_auto |= TXID_AUTO_CTB_MSK; hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, txid_auto); return; } } } static void prep_abort_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) { struct sas_task *task = slot->task; struct sas_internal_abort_task *abort = &task->abort_task; struct domain_device *dev = task->dev; struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; struct hisi_sas_port *port = slot->port; struct timer_list *timer = &slot->internal_abort_timer; struct hisi_sas_device *sas_dev = dev->lldd_dev; /* setup the quirk timer */ timer_setup(timer, hisi_sas_internal_abort_quirk_timeout, 0); /* Set the timeout to 10ms less than internal abort timeout */ mod_timer(timer, jiffies + msecs_to_jiffies(100)); /* dw0 */ hdr->dw0 = cpu_to_le32((5 << CMD_HDR_CMD_OFF) | /*abort*/ (port->id << CMD_HDR_PORT_OFF) | (dev_is_sata(dev) << CMD_HDR_ABORT_DEVICE_TYPE_OFF) | (abort->type << CMD_HDR_ABORT_FLAG_OFF)); /* dw1 */ hdr->dw1 = cpu_to_le32(sas_dev->device_id << CMD_HDR_DEV_ID_OFF); /* dw7 */ hdr->dw7 = cpu_to_le32(abort->tag << CMD_HDR_ABORT_IPTT_OFF); hdr->transfer_tags = cpu_to_le32(slot->idx); } static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba) { int i, res = IRQ_HANDLED; u32 port_id, link_rate; struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; struct asd_sas_phy *sas_phy = &phy->sas_phy; struct device *dev = hisi_hba->dev; u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd; struct sas_identify_frame *id = (struct sas_identify_frame *)frame_rcvd; hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1); if (is_sata_phy_v2_hw(hisi_hba, phy_no)) goto end; del_timer(&phy->timer); if (phy_no == 8) { u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE); port_id = (port_state & PORT_STATE_PHY8_PORT_NUM_MSK) >> PORT_STATE_PHY8_PORT_NUM_OFF; link_rate = (port_state & PORT_STATE_PHY8_CONN_RATE_MSK) >> PORT_STATE_PHY8_CONN_RATE_OFF; } else { port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); port_id = (port_id >> (4 * phy_no)) & 0xf; link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE); link_rate = (link_rate >> (phy_no * 4)) & 0xf; } if (port_id == 0xf) { dev_err(dev, "phyup: phy%d invalid portid\n", phy_no); res = IRQ_NONE; goto end; } for (i = 0; i < 6; i++) { u32 idaf = hisi_sas_phy_read32(hisi_hba, phy_no, RX_IDAF_DWORD0 + (i * 4)); frame_rcvd[i] = __swab32(idaf); } sas_phy->linkrate = link_rate; sas_phy->oob_mode = SAS_OOB_MODE; memcpy(sas_phy->attached_sas_addr, &id->sas_addr, SAS_ADDR_SIZE); dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate); phy->port_id = port_id; phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); phy->phy_type |= PORT_TYPE_SAS; phy->phy_attached = 1; phy->identify.device_type = id->dev_type; phy->frame_rcvd_size = sizeof(struct sas_identify_frame); if (phy->identify.device_type == SAS_END_DEVICE) phy->identify.target_port_protocols = SAS_PROTOCOL_SSP; else if (phy->identify.device_type != SAS_PHY_UNUSED) { phy->identify.target_port_protocols = SAS_PROTOCOL_SMP; if (!timer_pending(&hisi_hba->timer)) set_link_timer_quirk(hisi_hba); } hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP); end: if (phy->reset_completion) complete(phy->reset_completion); hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_SL_PHY_ENABLE_MSK); hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 0); return res; } static bool check_any_wideports_v2_hw(struct hisi_hba *hisi_hba) { u32 port_state; port_state = hisi_sas_read32(hisi_hba, PORT_STATE); if (port_state & 0x1ff) return true; return false; } static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba) { u32 phy_state, sl_ctrl, txid_auto; struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; struct hisi_sas_port *port = phy->port; struct device *dev = hisi_hba->dev; del_timer(&phy->timer); hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1); phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); dev_info(dev, "phydown: phy%d phy_state=0x%x\n", phy_no, phy_state); hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0, GFP_ATOMIC); sl_ctrl = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_ctrl & ~SL_CONTROL_CTA_MSK); if (port && !get_wideport_bitmap_v2_hw(hisi_hba, port->id)) if (!check_any_wideports_v2_hw(hisi_hba) && timer_pending(&hisi_hba->timer)) del_timer(&hisi_hba->timer); txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO); hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, txid_auto | TXID_AUTO_CT3_MSK); hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_NOT_RDY_MSK); hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 0); return IRQ_HANDLED; } static irqreturn_t int_phy_updown_v2_hw(int irq_no, void *p) { struct hisi_hba *hisi_hba = p; u32 irq_msk; int phy_no = 0; irqreturn_t res = IRQ_NONE; irq_msk = (hisi_sas_read32(hisi_hba, HGC_INVLD_DQE_INFO) >> HGC_INVLD_DQE_INFO_FB_CH0_OFF) & 0x1ff; while (irq_msk) { if (irq_msk & 1) { u32 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT0); switch (reg_value & (CHL_INT0_NOT_RDY_MSK | CHL_INT0_SL_PHY_ENABLE_MSK)) { case CHL_INT0_SL_PHY_ENABLE_MSK: /* phy up */ if (phy_up_v2_hw(phy_no, hisi_hba) == IRQ_HANDLED) res = IRQ_HANDLED; break; case CHL_INT0_NOT_RDY_MSK: /* phy down */ if (phy_down_v2_hw(phy_no, hisi_hba) == IRQ_HANDLED) res = IRQ_HANDLED; break; case (CHL_INT0_NOT_RDY_MSK | CHL_INT0_SL_PHY_ENABLE_MSK): reg_value = hisi_sas_read32(hisi_hba, PHY_STATE); if (reg_value & BIT(phy_no)) { /* phy up */ if (phy_up_v2_hw(phy_no, hisi_hba) == IRQ_HANDLED) res = IRQ_HANDLED; } else { /* phy down */ if (phy_down_v2_hw(phy_no, hisi_hba) == IRQ_HANDLED) res = IRQ_HANDLED; } break; default: break; } } irq_msk >>= 1; phy_no++; } return res; } static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba) { struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; u32 bcast_status; hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1); bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS); if (bcast_status & RX_BCAST_CHG_MSK) hisi_sas_phy_bcast(phy); hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_SL_RX_BCST_ACK_MSK); hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0); } static const struct hisi_sas_hw_error port_ecc_axi_error[] = { { .irq_msk = BIT(CHL_INT1_DMAC_TX_ECC_ERR_OFF), .msg = "dmac_tx_ecc_bad_err", }, { .irq_msk = BIT(CHL_INT1_DMAC_RX_ECC_ERR_OFF), .msg = "dmac_rx_ecc_bad_err", }, { .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF), .msg = "dma_tx_axi_wr_err", }, { .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF), .msg = "dma_tx_axi_rd_err", }, { .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF), .msg = "dma_rx_axi_wr_err", }, { .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF), .msg = "dma_rx_axi_rd_err", }, }; static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p) { struct hisi_hba *hisi_hba = p; struct device *dev = hisi_hba->dev; u32 ent_msk, ent_tmp, irq_msk; int phy_no = 0; ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3); ent_tmp = ent_msk; ent_msk |= ENT_INT_SRC_MSK3_ENT95_MSK_MSK; hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_msk); irq_msk = (hisi_sas_read32(hisi_hba, HGC_INVLD_DQE_INFO) >> HGC_INVLD_DQE_INFO_FB_CH3_OFF) & 0x1ff; while (irq_msk) { u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT0); u32 irq_value1 = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT1); u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2); if ((irq_msk & (1 << phy_no)) && irq_value1) { int i; for (i = 0; i < ARRAY_SIZE(port_ecc_axi_error); i++) { const struct hisi_sas_hw_error *error = &port_ecc_axi_error[i]; if (!(irq_value1 & error->irq_msk)) continue; dev_warn(dev, "%s error (phy%d 0x%x) found!\n", error->msg, phy_no, irq_value1); queue_work(hisi_hba->wq, &hisi_hba->rst_work); } hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT1, irq_value1); } if ((irq_msk & (1 << phy_no)) && irq_value2) { struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; if (irq_value2 & BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF)) { dev_warn(dev, "phy%d identify timeout\n", phy_no); hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); } hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2, irq_value2); } if ((irq_msk & (1 << phy_no)) && irq_value0) { if (irq_value0 & CHL_INT0_SL_RX_BCST_ACK_MSK) phy_bcast_v2_hw(phy_no, hisi_hba); if (irq_value0 & CHL_INT0_PHY_RDY_MSK) hisi_sas_phy_oob_ready(hisi_hba, phy_no); hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, irq_value0 & (~CHL_INT0_HOTPLUG_TOUT_MSK) & (~CHL_INT0_SL_PHY_ENABLE_MSK) & (~CHL_INT0_NOT_RDY_MSK)); } irq_msk &= ~(1 << phy_no); phy_no++; } hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_tmp); return IRQ_HANDLED; } static void one_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba, u32 irq_value) { struct device *dev = hisi_hba->dev; const struct hisi_sas_hw_error *ecc_error; u32 val; int i; for (i = 0; i < ARRAY_SIZE(one_bit_ecc_errors); i++) { ecc_error = &one_bit_ecc_errors[i]; if (irq_value & ecc_error->irq_msk) { val = hisi_sas_read32(hisi_hba, ecc_error->reg); val &= ecc_error->msk; val >>= ecc_error->shift; dev_warn(dev, "%s found: mem addr is 0x%08X\n", ecc_error->msg, val); } } } static void multi_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba, u32 irq_value) { struct device *dev = hisi_hba->dev; const struct hisi_sas_hw_error *ecc_error; u32 val; int i; for (i = 0; i < ARRAY_SIZE(multi_bit_ecc_errors); i++) { ecc_error = &multi_bit_ecc_errors[i]; if (irq_value & ecc_error->irq_msk) { val = hisi_sas_read32(hisi_hba, ecc_error->reg); val &= ecc_error->msk; val >>= ecc_error->shift; dev_err(dev, "%s (0x%x) found: mem addr is 0x%08X\n", ecc_error->msg, irq_value, val); queue_work(hisi_hba->wq, &hisi_hba->rst_work); } } return; } static irqreturn_t fatal_ecc_int_v2_hw(int irq_no, void *p) { struct hisi_hba *hisi_hba = p; u32 irq_value, irq_msk; irq_msk = hisi_sas_read32(hisi_hba, SAS_ECC_INTR_MSK); hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk | 0xffffffff); irq_value = hisi_sas_read32(hisi_hba, SAS_ECC_INTR); if (irq_value) { one_bit_ecc_error_process_v2_hw(hisi_hba, irq_value); multi_bit_ecc_error_process_v2_hw(hisi_hba, irq_value); } hisi_sas_write32(hisi_hba, SAS_ECC_INTR, irq_value); hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk); return IRQ_HANDLED; } static const struct hisi_sas_hw_error axi_error[] = { { .msk = BIT(0), .msg = "IOST_AXI_W_ERR" }, { .msk = BIT(1), .msg = "IOST_AXI_R_ERR" }, { .msk = BIT(2), .msg = "ITCT_AXI_W_ERR" }, { .msk = BIT(3), .msg = "ITCT_AXI_R_ERR" }, { .msk = BIT(4), .msg = "SATA_AXI_W_ERR" }, { .msk = BIT(5), .msg = "SATA_AXI_R_ERR" }, { .msk = BIT(6), .msg = "DQE_AXI_R_ERR" }, { .msk = BIT(7), .msg = "CQE_AXI_W_ERR" }, {} }; static const struct hisi_sas_hw_error fifo_error[] = { { .msk = BIT(8), .msg = "CQE_WINFO_FIFO" }, { .msk = BIT(9), .msg = "CQE_MSG_FIFIO" }, { .msk = BIT(10), .msg = "GETDQE_FIFO" }, { .msk = BIT(11), .msg = "CMDP_FIFO" }, { .msk = BIT(12), .msg = "AWTCTRL_FIFO" }, {} }; static const struct hisi_sas_hw_error fatal_axi_errors[] = { { .irq_msk = BIT(ENT_INT_SRC3_WP_DEPTH_OFF), .msg = "write pointer and depth", }, { .irq_msk = BIT(ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF), .msg = "iptt no match slot", }, { .irq_msk = BIT(ENT_INT_SRC3_RP_DEPTH_OFF), .msg = "read pointer and depth", }, { .irq_msk = BIT(ENT_INT_SRC3_AXI_OFF), .reg = HGC_AXI_FIFO_ERR_INFO, .sub = axi_error, }, { .irq_msk = BIT(ENT_INT_SRC3_FIFO_OFF), .reg = HGC_AXI_FIFO_ERR_INFO, .sub = fifo_error, }, { .irq_msk = BIT(ENT_INT_SRC3_LM_OFF), .msg = "LM add/fetch list", }, { .irq_msk = BIT(ENT_INT_SRC3_ABT_OFF), .msg = "SAS_HGC_ABT fetch LM list", }, }; static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p) { struct hisi_hba *hisi_hba = p; u32 irq_value, irq_msk, err_value; struct device *dev = hisi_hba->dev; const struct hisi_sas_hw_error *axi_error; int i; irq_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3); hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk | 0xfffffffe); irq_value = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); for (i = 0; i < ARRAY_SIZE(fatal_axi_errors); i++) { axi_error = &fatal_axi_errors[i]; if (!(irq_value & axi_error->irq_msk)) continue; hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 1 << axi_error->shift); if (axi_error->sub) { const struct hisi_sas_hw_error *sub = axi_error->sub; err_value = hisi_sas_read32(hisi_hba, axi_error->reg); for (; sub->msk || sub->msg; sub++) { if (!(err_value & sub->msk)) continue; dev_err(dev, "%s (0x%x) found!\n", sub->msg, irq_value); queue_work(hisi_hba->wq, &hisi_hba->rst_work); } } else { dev_err(dev, "%s (0x%x) found!\n", axi_error->msg, irq_value); queue_work(hisi_hba->wq, &hisi_hba->rst_work); } } if (irq_value & BIT(ENT_INT_SRC3_ITC_INT_OFF)) { u32 reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR); u32 dev_id = reg_val & ITCT_DEV_MSK; struct hisi_sas_device *sas_dev = &hisi_hba->devices[dev_id]; hisi_sas_write32(hisi_hba, ITCT_CLR, 0); dev_dbg(dev, "clear ITCT ok\n"); complete(sas_dev->completion); } hisi_sas_write32(hisi_hba, ENT_INT_SRC3, irq_value); hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk); return IRQ_HANDLED; } static irqreturn_t cq_thread_v2_hw(int irq_no, void *p) { struct hisi_sas_cq *cq = p; struct hisi_hba *hisi_hba = cq->hisi_hba; struct hisi_sas_slot *slot; struct hisi_sas_itct *itct; struct hisi_sas_complete_v2_hdr *complete_queue; u32 rd_point = cq->rd_point, wr_point, dev_id; int queue = cq->id; if (unlikely(hisi_hba->reject_stp_links_msk)) phys_try_accept_stp_links_v2_hw(hisi_hba); complete_queue = hisi_hba->complete_hdr[queue]; wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR + (0x14 * queue)); while (rd_point != wr_point) { struct hisi_sas_complete_v2_hdr *complete_hdr; int iptt; complete_hdr = &complete_queue[rd_point]; /* Check for NCQ completion */ if (complete_hdr->act) { u32 act_tmp = le32_to_cpu(complete_hdr->act); int ncq_tag_count = ffs(act_tmp); u32 dw1 = le32_to_cpu(complete_hdr->dw1); dev_id = (dw1 & CMPLT_HDR_DEV_ID_MSK) >> CMPLT_HDR_DEV_ID_OFF; itct = &hisi_hba->itct[dev_id]; /* The NCQ tags are held in the itct header */ while (ncq_tag_count) { __le64 *_ncq_tag = &itct->qw4_15[0], __ncq_tag; u64 ncq_tag; ncq_tag_count--; __ncq_tag = _ncq_tag[ncq_tag_count / 5]; ncq_tag = le64_to_cpu(__ncq_tag); iptt = (ncq_tag >> (ncq_tag_count % 5) * 12) & 0xfff; slot = &hisi_hba->slot_info[iptt]; slot->cmplt_queue_slot = rd_point; slot->cmplt_queue = queue; slot_complete_v2_hw(hisi_hba, slot); act_tmp &= ~(1 << ncq_tag_count); ncq_tag_count = ffs(act_tmp); } } else { u32 dw1 = le32_to_cpu(complete_hdr->dw1); iptt = dw1 & CMPLT_HDR_IPTT_MSK; slot = &hisi_hba->slot_info[iptt]; slot->cmplt_queue_slot = rd_point; slot->cmplt_queue = queue; slot_complete_v2_hw(hisi_hba, slot); } if (++rd_point >= HISI_SAS_QUEUE_SLOTS) rd_point = 0; } /* update rd_point */ cq->rd_point = rd_point; hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point); return IRQ_HANDLED; } static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p) { struct hisi_sas_cq *cq = p; struct hisi_hba *hisi_hba = cq->hisi_hba; int queue = cq->id; hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue); return IRQ_WAKE_THREAD; } static irqreturn_t sata_int_v2_hw(int irq_no, void *p) { struct hisi_sas_phy *phy = p; struct hisi_hba *hisi_hba = phy->hisi_hba; struct asd_sas_phy *sas_phy = &phy->sas_phy; struct device *dev = hisi_hba->dev; struct hisi_sas_initial_fis *initial_fis; struct dev_to_host_fis *fis; u32 ent_tmp, ent_msk, ent_int, port_id, link_rate, hard_phy_linkrate; irqreturn_t res = IRQ_HANDLED; u8 attached_sas_addr[SAS_ADDR_SIZE] = {0}; int phy_no, offset; del_timer(&phy->timer); phy_no = sas_phy->id; initial_fis = &hisi_hba->initial_fis[phy_no]; fis = &initial_fis->fis; offset = 4 * (phy_no / 4); ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK1 + offset); hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1 + offset, ent_msk | 1 << ((phy_no % 4) * 8)); ent_int = hisi_sas_read32(hisi_hba, ENT_INT_SRC1 + offset); ent_tmp = ent_int & (1 << (ENT_INT_SRC1_D2H_FIS_CH1_OFF * (phy_no % 4))); ent_int >>= ENT_INT_SRC1_D2H_FIS_CH1_OFF * (phy_no % 4); if ((ent_int & ENT_INT_SRC1_D2H_FIS_CH0_MSK) == 0) { dev_warn(dev, "sata int: phy%d did not receive FIS\n", phy_no); res = IRQ_NONE; goto end; } /* check ERR bit of Status Register */ if (fis->status & ATA_ERR) { dev_warn(dev, "sata int: phy%d FIS status: 0x%x\n", phy_no, fis->status); hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); res = IRQ_NONE; goto end; } if (unlikely(phy_no == 8)) { u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE); port_id = (port_state & PORT_STATE_PHY8_PORT_NUM_MSK) >> PORT_STATE_PHY8_PORT_NUM_OFF; link_rate = (port_state & PORT_STATE_PHY8_CONN_RATE_MSK) >> PORT_STATE_PHY8_CONN_RATE_OFF; } else { port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); port_id = (port_id >> (4 * phy_no)) & 0xf; link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE); link_rate = (link_rate >> (phy_no * 4)) & 0xf; } if (port_id == 0xf) { dev_err(dev, "sata int: phy%d invalid portid\n", phy_no); res = IRQ_NONE; goto end; } sas_phy->linkrate = link_rate; hard_phy_linkrate = hisi_sas_phy_read32(hisi_hba, phy_no, HARD_PHY_LINKRATE); phy->maximum_linkrate = hard_phy_linkrate & 0xf; phy->minimum_linkrate = (hard_phy_linkrate >> 4) & 0xf; sas_phy->oob_mode = SATA_OOB_MODE; /* Make up some unique SAS address */ attached_sas_addr[0] = 0x50; attached_sas_addr[6] = hisi_hba->shost->host_no; attached_sas_addr[7] = phy_no; memcpy(sas_phy->attached_sas_addr, attached_sas_addr, SAS_ADDR_SIZE); memcpy(sas_phy->frame_rcvd, fis, sizeof(struct dev_to_host_fis)); dev_info(dev, "sata int phyup: phy%d link_rate=%d\n", phy_no, link_rate); phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); phy->port_id = port_id; phy->phy_type |= PORT_TYPE_SATA; phy->phy_attached = 1; phy->identify.device_type = SAS_SATA_DEV; phy->frame_rcvd_size = sizeof(struct dev_to_host_fis); phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP); if (phy->reset_completion) complete(phy->reset_completion); end: hisi_sas_write32(hisi_hba, ENT_INT_SRC1 + offset, ent_tmp); hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1 + offset, ent_msk); return res; } static irq_handler_t phy_interrupts[HISI_SAS_PHY_INT_NR] = { int_phy_updown_v2_hw, int_chnl_int_v2_hw, }; static irq_handler_t fatal_interrupts[HISI_SAS_FATAL_INT_NR] = { fatal_ecc_int_v2_hw, fatal_axi_int_v2_hw }; #define CQ0_IRQ_INDEX (96) static int hisi_sas_v2_interrupt_preinit(struct hisi_hba *hisi_hba) { struct platform_device *pdev = hisi_hba->platform_dev; struct Scsi_Host *shost = hisi_hba->shost; struct irq_affinity desc = { .pre_vectors = CQ0_IRQ_INDEX, .post_vectors = 16, }; int resv = desc.pre_vectors + desc.post_vectors, minvec = resv + 1, nvec; nvec = devm_platform_get_irqs_affinity(pdev, &desc, minvec, 128, &hisi_hba->irq_map); if (nvec < 0) return nvec; shost->nr_hw_queues = hisi_hba->cq_nvecs = nvec - resv; return 0; } /* * There is a limitation in the hip06 chipset that we need * to map in all mbigen interrupts, even if they are not used. */ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba) { struct platform_device *pdev = hisi_hba->platform_dev; struct device *dev = &pdev->dev; int irq, rc = 0; int i, phy_no, fatal_no, queue_no; for (i = 0; i < HISI_SAS_PHY_INT_NR; i++) { irq = hisi_hba->irq_map[i + 1]; /* Phy up/down is irq1 */ rc = devm_request_irq(dev, irq, phy_interrupts[i], 0, DRV_NAME " phy", hisi_hba); if (rc) { dev_err(dev, "irq init: could not request phy interrupt %d, rc=%d\n", irq, rc); rc = -ENOENT; goto err_out; } } for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; irq = hisi_hba->irq_map[phy_no + 72]; rc = devm_request_irq(dev, irq, sata_int_v2_hw, 0, DRV_NAME " sata", phy); if (rc) { dev_err(dev, "irq init: could not request sata interrupt %d, rc=%d\n", irq, rc); rc = -ENOENT; goto err_out; } } for (fatal_no = 0; fatal_no < HISI_SAS_FATAL_INT_NR; fatal_no++) { irq = hisi_hba->irq_map[fatal_no + 81]; rc = devm_request_irq(dev, irq, fatal_interrupts[fatal_no], 0, DRV_NAME " fatal", hisi_hba); if (rc) { dev_err(dev, "irq init: could not request fatal interrupt %d, rc=%d\n", irq, rc); rc = -ENOENT; goto err_out; } } for (queue_no = 0; queue_no < hisi_hba->cq_nvecs; queue_no++) { struct hisi_sas_cq *cq = &hisi_hba->cq[queue_no]; cq->irq_no = hisi_hba->irq_map[queue_no + 96]; rc = devm_request_threaded_irq(dev, cq->irq_no, cq_interrupt_v2_hw, cq_thread_v2_hw, IRQF_ONESHOT, DRV_NAME " cq", cq); if (rc) { dev_err(dev, "irq init: could not request cq interrupt %d, rc=%d\n", cq->irq_no, rc); rc = -ENOENT; goto err_out; } cq->irq_mask = irq_get_affinity_mask(cq->irq_no); } err_out: return rc; } static int hisi_sas_v2_init(struct hisi_hba *hisi_hba) { int rc; memset(hisi_hba->sata_dev_bitmap, 0, sizeof(hisi_hba->sata_dev_bitmap)); rc = hw_init_v2_hw(hisi_hba); if (rc) return rc; rc = interrupt_init_v2_hw(hisi_hba); if (rc) return rc; return 0; } static void interrupt_disable_v2_hw(struct hisi_hba *hisi_hba) { struct platform_device *pdev = hisi_hba->platform_dev; int i; for (i = 0; i < hisi_hba->queue_count; i++) hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0x1); hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xffffffff); hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xffffffff); hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffffffff); hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xffffffff); for (i = 0; i < hisi_hba->n_phy; i++) { hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff); hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffffff); } for (i = 0; i < 128; i++) synchronize_irq(platform_get_irq(pdev, i)); } static u32 get_phys_state_v2_hw(struct hisi_hba *hisi_hba) { return hisi_sas_read32(hisi_hba, PHY_STATE); } static int soft_reset_v2_hw(struct hisi_hba *hisi_hba) { struct device *dev = hisi_hba->dev; int rc, cnt; interrupt_disable_v2_hw(hisi_hba); hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0); hisi_sas_stop_phys(hisi_hba); mdelay(10); hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + AM_CTRL_GLOBAL, 0x1); /* wait until bus idle */ cnt = 0; while (1) { u32 status = hisi_sas_read32_relaxed(hisi_hba, AXI_MASTER_CFG_BASE + AM_CURR_TRANS_RETURN); if (status == 0x3) break; udelay(10); if (cnt++ > 10) { dev_err(dev, "wait axi bus state to idle timeout!\n"); return -1; } } hisi_sas_init_mem(hisi_hba); rc = hw_init_v2_hw(hisi_hba); if (rc) return rc; phys_reject_stp_links_v2_hw(hisi_hba); return 0; } static int write_gpio_v2_hw(struct hisi_hba *hisi_hba, u8 reg_type, u8 reg_index, u8 reg_count, u8 *write_data) { struct device *dev = hisi_hba->dev; int phy_no, count; if (!hisi_hba->sgpio_regs) return -EOPNOTSUPP; switch (reg_type) { case SAS_GPIO_REG_TX: count = reg_count * 4; count = min(count, hisi_hba->n_phy); for (phy_no = 0; phy_no < count; phy_no++) { /* * GPIO_TX[n] register has the highest numbered drive * of the four in the first byte and the lowest * numbered drive in the fourth byte. * See SFF-8485 Rev. 0.7 Table 24. */ void __iomem *reg_addr = hisi_hba->sgpio_regs + reg_index * 4 + phy_no; int data_idx = phy_no + 3 - (phy_no % 4) * 2; writeb(write_data[data_idx], reg_addr); } break; default: dev_err(dev, "write gpio: unsupported or bad reg type %d\n", reg_type); return -EINVAL; } return 0; } static void wait_cmds_complete_timeout_v2_hw(struct hisi_hba *hisi_hba, int delay_ms, int timeout_ms) { struct device *dev = hisi_hba->dev; int entries, entries_old = 0, time; for (time = 0; time < timeout_ms; time += delay_ms) { entries = hisi_sas_read32(hisi_hba, CQE_SEND_CNT); if (entries == entries_old) break; entries_old = entries; msleep(delay_ms); } if (time >= timeout_ms) { dev_dbg(dev, "Wait commands complete timeout!\n"); return; } dev_dbg(dev, "wait commands complete %dms\n", time); } static struct attribute *host_v2_hw_attrs[] = { &dev_attr_phy_event_threshold.attr, NULL }; ATTRIBUTE_GROUPS(host_v2_hw); static void map_queues_v2_hw(struct Scsi_Host *shost) { struct hisi_hba *hisi_hba = shost_priv(shost); struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; const struct cpumask *mask; unsigned int queue, cpu; for (queue = 0; queue < qmap->nr_queues; queue++) { mask = irq_get_affinity_mask(hisi_hba->irq_map[96 + queue]); if (!mask) continue; for_each_cpu(cpu, mask) qmap->mq_map[cpu] = qmap->queue_offset + queue; } } static const struct scsi_host_template sht_v2_hw = { .name = DRV_NAME, .proc_name = DRV_NAME, .module = THIS_MODULE, .queuecommand = sas_queuecommand, .dma_need_drain = ata_scsi_dma_need_drain, .target_alloc = sas_target_alloc, .slave_configure = hisi_sas_slave_configure, .scan_finished = hisi_sas_scan_finished, .scan_start = hisi_sas_scan_start, .change_queue_depth = sas_change_queue_depth, .bios_param = sas_bios_param, .this_id = -1, .sg_tablesize = HISI_SAS_SGE_PAGE_CNT, .max_sectors = SCSI_DEFAULT_MAX_SECTORS, .eh_device_reset_handler = sas_eh_device_reset_handler, .eh_target_reset_handler = sas_eh_target_reset_handler, .slave_alloc = hisi_sas_slave_alloc, .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = sas_ioctl, #endif .shost_groups = host_v2_hw_groups, .host_reset = hisi_sas_host_reset, .map_queues = map_queues_v2_hw, .host_tagset = 1, }; static const struct hisi_sas_hw hisi_sas_v2_hw = { .hw_init = hisi_sas_v2_init, .interrupt_preinit = hisi_sas_v2_interrupt_preinit, .setup_itct = setup_itct_v2_hw, .slot_index_alloc = slot_index_alloc_quirk_v2_hw, .alloc_dev = alloc_dev_quirk_v2_hw, .sl_notify_ssp = sl_notify_ssp_v2_hw, .get_wideport_bitmap = get_wideport_bitmap_v2_hw, .clear_itct = clear_itct_v2_hw, .free_device = free_device_v2_hw, .prep_smp = prep_smp_v2_hw, .prep_ssp = prep_ssp_v2_hw, .prep_stp = prep_ata_v2_hw, .prep_abort = prep_abort_v2_hw, .start_delivery = start_delivery_v2_hw, .phys_init = phys_init_v2_hw, .phy_start = start_phy_v2_hw, .phy_disable = disable_phy_v2_hw, .phy_hard_reset = phy_hard_reset_v2_hw, .get_events = phy_get_events_v2_hw, .phy_set_linkrate = phy_set_linkrate_v2_hw, .phy_get_max_linkrate = phy_get_max_linkrate_v2_hw, .complete_hdr_size = sizeof(struct hisi_sas_complete_v2_hdr), .soft_reset = soft_reset_v2_hw, .get_phys_state = get_phys_state_v2_hw, .write_gpio = write_gpio_v2_hw, .wait_cmds_complete_timeout = wait_cmds_complete_timeout_v2_hw, .sht = &sht_v2_hw, }; static int hisi_sas_v2_probe(struct platform_device *pdev) { return hisi_sas_probe(pdev, &hisi_sas_v2_hw); } static const struct of_device_id sas_v2_of_match[] = { { .compatible = "hisilicon,hip06-sas-v2",}, { .compatible = "hisilicon,hip07-sas-v2",}, {}, }; MODULE_DEVICE_TABLE(of, sas_v2_of_match); static const struct acpi_device_id sas_v2_acpi_match[] = { { "HISI0162", 0 }, { } }; MODULE_DEVICE_TABLE(acpi, sas_v2_acpi_match); static struct platform_driver hisi_sas_v2_driver = { .probe = hisi_sas_v2_probe, .remove_new = hisi_sas_remove, .driver = { .name = DRV_NAME, .of_match_table = sas_v2_of_match, .acpi_match_table = ACPI_PTR(sas_v2_acpi_match), }, }; module_platform_driver(hisi_sas_v2_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("John Garry <[email protected]>"); MODULE_DESCRIPTION("HISILICON SAS controller v2 hw driver"); MODULE_ALIAS("platform:" DRV_NAME);
linux-master
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2017 Hisilicon Limited. */ #include <linux/sched/clock.h> #include "hisi_sas.h" #define DRV_NAME "hisi_sas_v3_hw" /* global registers need init */ #define DLVRY_QUEUE_ENABLE 0x0 #define IOST_BASE_ADDR_LO 0x8 #define IOST_BASE_ADDR_HI 0xc #define ITCT_BASE_ADDR_LO 0x10 #define ITCT_BASE_ADDR_HI 0x14 #define IO_BROKEN_MSG_ADDR_LO 0x18 #define IO_BROKEN_MSG_ADDR_HI 0x1c #define PHY_CONTEXT 0x20 #define PHY_STATE 0x24 #define PHY_PORT_NUM_MA 0x28 #define PHY_CONN_RATE 0x30 #define ITCT_CLR 0x44 #define ITCT_CLR_EN_OFF 16 #define ITCT_CLR_EN_MSK (0x1 << ITCT_CLR_EN_OFF) #define ITCT_DEV_OFF 0 #define ITCT_DEV_MSK (0x7ff << ITCT_DEV_OFF) #define SAS_AXI_USER3 0x50 #define IO_SATA_BROKEN_MSG_ADDR_LO 0x58 #define IO_SATA_BROKEN_MSG_ADDR_HI 0x5c #define SATA_INITI_D2H_STORE_ADDR_LO 0x60 #define SATA_INITI_D2H_STORE_ADDR_HI 0x64 #define CFG_MAX_TAG 0x68 #define TRANS_LOCK_ICT_TIME 0X70 #define HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL 0x84 #define HGC_SAS_TXFAIL_RETRY_CTRL 0x88 #define HGC_GET_ITV_TIME 0x90 #define DEVICE_MSG_WORK_MODE 0x94 #define OPENA_WT_CONTI_TIME 0x9c #define I_T_NEXUS_LOSS_TIME 0xa0 #define MAX_CON_TIME_LIMIT_TIME 0xa4 #define BUS_INACTIVE_LIMIT_TIME 0xa8 #define REJECT_TO_OPEN_LIMIT_TIME 0xac #define CQ_INT_CONVERGE_EN 0xb0 #define CFG_AGING_TIME 0xbc #define HGC_DFX_CFG2 0xc0 #define CFG_ABT_SET_QUERY_IPTT 0xd4 #define CFG_SET_ABORTED_IPTT_OFF 0 #define CFG_SET_ABORTED_IPTT_MSK (0xfff << CFG_SET_ABORTED_IPTT_OFF) #define CFG_SET_ABORTED_EN_OFF 12 #define CFG_ABT_SET_IPTT_DONE 0xd8 #define CFG_ABT_SET_IPTT_DONE_OFF 0 #define HGC_IOMB_PROC1_STATUS 0x104 #define HGC_LM_DFX_STATUS2 0x128 #define HGC_LM_DFX_STATUS2_IOSTLIST_OFF 0 #define HGC_LM_DFX_STATUS2_IOSTLIST_MSK (0xfff << \ HGC_LM_DFX_STATUS2_IOSTLIST_OFF) #define HGC_LM_DFX_STATUS2_ITCTLIST_OFF 12 #define HGC_LM_DFX_STATUS2_ITCTLIST_MSK (0x7ff << \ HGC_LM_DFX_STATUS2_ITCTLIST_OFF) #define HGC_CQE_ECC_ADDR 0x13c #define HGC_CQE_ECC_1B_ADDR_OFF 0 #define HGC_CQE_ECC_1B_ADDR_MSK (0x3f << HGC_CQE_ECC_1B_ADDR_OFF) #define HGC_CQE_ECC_MB_ADDR_OFF 8 #define HGC_CQE_ECC_MB_ADDR_MSK (0x3f << HGC_CQE_ECC_MB_ADDR_OFF) #define HGC_IOST_ECC_ADDR 0x140 #define HGC_IOST_ECC_1B_ADDR_OFF 0 #define HGC_IOST_ECC_1B_ADDR_MSK (0x3ff << HGC_IOST_ECC_1B_ADDR_OFF) #define HGC_IOST_ECC_MB_ADDR_OFF 16 #define HGC_IOST_ECC_MB_ADDR_MSK (0x3ff << HGC_IOST_ECC_MB_ADDR_OFF) #define HGC_DQE_ECC_ADDR 0x144 #define HGC_DQE_ECC_1B_ADDR_OFF 0 #define HGC_DQE_ECC_1B_ADDR_MSK (0xfff << HGC_DQE_ECC_1B_ADDR_OFF) #define HGC_DQE_ECC_MB_ADDR_OFF 16 #define HGC_DQE_ECC_MB_ADDR_MSK (0xfff << HGC_DQE_ECC_MB_ADDR_OFF) #define CHNL_INT_STATUS 0x148 #define TAB_DFX 0x14c #define HGC_ITCT_ECC_ADDR 0x150 #define HGC_ITCT_ECC_1B_ADDR_OFF 0 #define HGC_ITCT_ECC_1B_ADDR_MSK (0x3ff << \ HGC_ITCT_ECC_1B_ADDR_OFF) #define HGC_ITCT_ECC_MB_ADDR_OFF 16 #define HGC_ITCT_ECC_MB_ADDR_MSK (0x3ff << \ HGC_ITCT_ECC_MB_ADDR_OFF) #define HGC_AXI_FIFO_ERR_INFO 0x154 #define AXI_ERR_INFO_OFF 0 #define AXI_ERR_INFO_MSK (0xff << AXI_ERR_INFO_OFF) #define FIFO_ERR_INFO_OFF 8 #define FIFO_ERR_INFO_MSK (0xff << FIFO_ERR_INFO_OFF) #define TAB_RD_TYPE 0x15c #define INT_COAL_EN 0x19c #define OQ_INT_COAL_TIME 0x1a0 #define OQ_INT_COAL_CNT 0x1a4 #define ENT_INT_COAL_TIME 0x1a8 #define ENT_INT_COAL_CNT 0x1ac #define OQ_INT_SRC 0x1b0 #define OQ_INT_SRC_MSK 0x1b4 #define ENT_INT_SRC1 0x1b8 #define ENT_INT_SRC1_D2H_FIS_CH0_OFF 0 #define ENT_INT_SRC1_D2H_FIS_CH0_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH0_OFF) #define ENT_INT_SRC1_D2H_FIS_CH1_OFF 8 #define ENT_INT_SRC1_D2H_FIS_CH1_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH1_OFF) #define ENT_INT_SRC2 0x1bc #define ENT_INT_SRC3 0x1c0 #define ENT_INT_SRC3_WP_DEPTH_OFF 8 #define ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF 9 #define ENT_INT_SRC3_RP_DEPTH_OFF 10 #define ENT_INT_SRC3_AXI_OFF 11 #define ENT_INT_SRC3_FIFO_OFF 12 #define ENT_INT_SRC3_LM_OFF 14 #define ENT_INT_SRC3_ITC_INT_OFF 15 #define ENT_INT_SRC3_ITC_INT_MSK (0x1 << ENT_INT_SRC3_ITC_INT_OFF) #define ENT_INT_SRC3_ABT_OFF 16 #define ENT_INT_SRC3_DQE_POISON_OFF 18 #define ENT_INT_SRC3_IOST_POISON_OFF 19 #define ENT_INT_SRC3_ITCT_POISON_OFF 20 #define ENT_INT_SRC3_ITCT_NCQ_POISON_OFF 21 #define ENT_INT_SRC_MSK1 0x1c4 #define ENT_INT_SRC_MSK2 0x1c8 #define ENT_INT_SRC_MSK3 0x1cc #define ENT_INT_SRC_MSK3_ENT95_MSK_OFF 31 #define CHNL_PHYUPDOWN_INT_MSK 0x1d0 #define CHNL_ENT_INT_MSK 0x1d4 #define HGC_COM_INT_MSK 0x1d8 #define ENT_INT_SRC_MSK3_ENT95_MSK_MSK (0x1 << ENT_INT_SRC_MSK3_ENT95_MSK_OFF) #define SAS_ECC_INTR 0x1e8 #define SAS_ECC_INTR_DQE_ECC_1B_OFF 0 #define SAS_ECC_INTR_DQE_ECC_MB_OFF 1 #define SAS_ECC_INTR_IOST_ECC_1B_OFF 2 #define SAS_ECC_INTR_IOST_ECC_MB_OFF 3 #define SAS_ECC_INTR_ITCT_ECC_1B_OFF 4 #define SAS_ECC_INTR_ITCT_ECC_MB_OFF 5 #define SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF 6 #define SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF 7 #define SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF 8 #define SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF 9 #define SAS_ECC_INTR_CQE_ECC_1B_OFF 10 #define SAS_ECC_INTR_CQE_ECC_MB_OFF 11 #define SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF 12 #define SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF 13 #define SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF 14 #define SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF 15 #define SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF 16 #define SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF 17 #define SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF 18 #define SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF 19 #define SAS_ECC_INTR_OOO_RAM_ECC_1B_OFF 20 #define SAS_ECC_INTR_OOO_RAM_ECC_MB_OFF 21 #define SAS_ECC_INTR_MSK 0x1ec #define HGC_ERR_STAT_EN 0x238 #define CQE_SEND_CNT 0x248 #define DLVRY_Q_0_BASE_ADDR_LO 0x260 #define DLVRY_Q_0_BASE_ADDR_HI 0x264 #define DLVRY_Q_0_DEPTH 0x268 #define DLVRY_Q_0_WR_PTR 0x26c #define DLVRY_Q_0_RD_PTR 0x270 #define HYPER_STREAM_ID_EN_CFG 0xc80 #define OQ0_INT_SRC_MSK 0xc90 #define COMPL_Q_0_BASE_ADDR_LO 0x4e0 #define COMPL_Q_0_BASE_ADDR_HI 0x4e4 #define COMPL_Q_0_DEPTH 0x4e8 #define COMPL_Q_0_WR_PTR 0x4ec #define COMPL_Q_0_RD_PTR 0x4f0 #define HGC_RXM_DFX_STATUS14 0xae8 #define HGC_RXM_DFX_STATUS14_MEM0_OFF 0 #define HGC_RXM_DFX_STATUS14_MEM0_MSK (0x1ff << \ HGC_RXM_DFX_STATUS14_MEM0_OFF) #define HGC_RXM_DFX_STATUS14_MEM1_OFF 9 #define HGC_RXM_DFX_STATUS14_MEM1_MSK (0x1ff << \ HGC_RXM_DFX_STATUS14_MEM1_OFF) #define HGC_RXM_DFX_STATUS14_MEM2_OFF 18 #define HGC_RXM_DFX_STATUS14_MEM2_MSK (0x1ff << \ HGC_RXM_DFX_STATUS14_MEM2_OFF) #define HGC_RXM_DFX_STATUS15 0xaec #define HGC_RXM_DFX_STATUS15_MEM3_OFF 0 #define HGC_RXM_DFX_STATUS15_MEM3_MSK (0x1ff << \ HGC_RXM_DFX_STATUS15_MEM3_OFF) #define AWQOS_AWCACHE_CFG 0xc84 #define ARQOS_ARCACHE_CFG 0xc88 #define HILINK_ERR_DFX 0xe04 #define SAS_GPIO_CFG_0 0x1000 #define SAS_GPIO_CFG_1 0x1004 #define SAS_GPIO_TX_0_1 0x1040 #define SAS_CFG_DRIVE_VLD 0x1070 /* phy registers requiring init */ #define PORT_BASE (0x2000) #define PHY_CFG (PORT_BASE + 0x0) #define HARD_PHY_LINKRATE (PORT_BASE + 0x4) #define PHY_CFG_ENA_OFF 0 #define PHY_CFG_ENA_MSK (0x1 << PHY_CFG_ENA_OFF) #define PHY_CFG_DC_OPT_OFF 2 #define PHY_CFG_DC_OPT_MSK (0x1 << PHY_CFG_DC_OPT_OFF) #define PHY_CFG_PHY_RST_OFF 3 #define PHY_CFG_PHY_RST_MSK (0x1 << PHY_CFG_PHY_RST_OFF) #define PROG_PHY_LINK_RATE (PORT_BASE + 0x8) #define CFG_PROG_PHY_LINK_RATE_OFF 0 #define CFG_PROG_PHY_LINK_RATE_MSK (0xff << CFG_PROG_PHY_LINK_RATE_OFF) #define CFG_PROG_OOB_PHY_LINK_RATE_OFF 8 #define CFG_PROG_OOB_PHY_LINK_RATE_MSK (0xf << CFG_PROG_OOB_PHY_LINK_RATE_OFF) #define PHY_CTRL (PORT_BASE + 0x14) #define PHY_CTRL_RESET_OFF 0 #define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF) #define CMD_HDR_PIR_OFF 8 #define CMD_HDR_PIR_MSK (0x1 << CMD_HDR_PIR_OFF) #define SERDES_CFG (PORT_BASE + 0x1c) #define CFG_ALOS_CHK_DISABLE_OFF 9 #define CFG_ALOS_CHK_DISABLE_MSK (0x1 << CFG_ALOS_CHK_DISABLE_OFF) #define SAS_PHY_BIST_CTRL (PORT_BASE + 0x2c) #define CFG_BIST_MODE_SEL_OFF 0 #define CFG_BIST_MODE_SEL_MSK (0xf << CFG_BIST_MODE_SEL_OFF) #define CFG_LOOP_TEST_MODE_OFF 14 #define CFG_LOOP_TEST_MODE_MSK (0x3 << CFG_LOOP_TEST_MODE_OFF) #define CFG_RX_BIST_EN_OFF 16 #define CFG_RX_BIST_EN_MSK (0x1 << CFG_RX_BIST_EN_OFF) #define CFG_TX_BIST_EN_OFF 17 #define CFG_TX_BIST_EN_MSK (0x1 << CFG_TX_BIST_EN_OFF) #define CFG_BIST_TEST_OFF 18 #define CFG_BIST_TEST_MSK (0x1 << CFG_BIST_TEST_OFF) #define SAS_PHY_BIST_CODE (PORT_BASE + 0x30) #define SAS_PHY_BIST_CODE1 (PORT_BASE + 0x34) #define SAS_BIST_ERR_CNT (PORT_BASE + 0x38) #define SL_CFG (PORT_BASE + 0x84) #define AIP_LIMIT (PORT_BASE + 0x90) #define SL_CONTROL (PORT_BASE + 0x94) #define SL_CONTROL_NOTIFY_EN_OFF 0 #define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF) #define SL_CTA_OFF 17 #define SL_CTA_MSK (0x1 << SL_CTA_OFF) #define RX_PRIMS_STATUS (PORT_BASE + 0x98) #define RX_BCAST_CHG_OFF 1 #define RX_BCAST_CHG_MSK (0x1 << RX_BCAST_CHG_OFF) #define TX_ID_DWORD0 (PORT_BASE + 0x9c) #define TX_ID_DWORD1 (PORT_BASE + 0xa0) #define TX_ID_DWORD2 (PORT_BASE + 0xa4) #define TX_ID_DWORD3 (PORT_BASE + 0xa8) #define TX_ID_DWORD4 (PORT_BASE + 0xaC) #define TX_ID_DWORD5 (PORT_BASE + 0xb0) #define TX_ID_DWORD6 (PORT_BASE + 0xb4) #define TXID_AUTO (PORT_BASE + 0xb8) #define CT3_OFF 1 #define CT3_MSK (0x1 << CT3_OFF) #define TX_HARDRST_OFF 2 #define TX_HARDRST_MSK (0x1 << TX_HARDRST_OFF) #define RX_IDAF_DWORD0 (PORT_BASE + 0xc4) #define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc) #define STP_LINK_TIMER (PORT_BASE + 0x120) #define STP_LINK_TIMEOUT_STATE (PORT_BASE + 0x124) #define CON_CFG_DRIVER (PORT_BASE + 0x130) #define SAS_SSP_CON_TIMER_CFG (PORT_BASE + 0x134) #define SAS_SMP_CON_TIMER_CFG (PORT_BASE + 0x138) #define SAS_STP_CON_TIMER_CFG (PORT_BASE + 0x13c) #define CHL_INT0 (PORT_BASE + 0x1b4) #define CHL_INT0_HOTPLUG_TOUT_OFF 0 #define CHL_INT0_HOTPLUG_TOUT_MSK (0x1 << CHL_INT0_HOTPLUG_TOUT_OFF) #define CHL_INT0_SL_RX_BCST_ACK_OFF 1 #define CHL_INT0_SL_RX_BCST_ACK_MSK (0x1 << CHL_INT0_SL_RX_BCST_ACK_OFF) #define CHL_INT0_SL_PHY_ENABLE_OFF 2 #define CHL_INT0_SL_PHY_ENABLE_MSK (0x1 << CHL_INT0_SL_PHY_ENABLE_OFF) #define CHL_INT0_NOT_RDY_OFF 4 #define CHL_INT0_NOT_RDY_MSK (0x1 << CHL_INT0_NOT_RDY_OFF) #define CHL_INT0_PHY_RDY_OFF 5 #define CHL_INT0_PHY_RDY_MSK (0x1 << CHL_INT0_PHY_RDY_OFF) #define CHL_INT1 (PORT_BASE + 0x1b8) #define CHL_INT1_DMAC_TX_ECC_MB_ERR_OFF 15 #define CHL_INT1_DMAC_TX_ECC_1B_ERR_OFF 16 #define CHL_INT1_DMAC_RX_ECC_MB_ERR_OFF 17 #define CHL_INT1_DMAC_RX_ECC_1B_ERR_OFF 18 #define CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF 19 #define CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF 20 #define CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF 21 #define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22 #define CHL_INT1_DMAC_TX_FIFO_ERR_OFF 23 #define CHL_INT1_DMAC_RX_FIFO_ERR_OFF 24 #define CHL_INT1_DMAC_TX_AXI_RUSER_ERR_OFF 26 #define CHL_INT1_DMAC_RX_AXI_RUSER_ERR_OFF 27 #define CHL_INT2 (PORT_BASE + 0x1bc) #define CHL_INT2_SL_IDAF_TOUT_CONF_OFF 0 #define CHL_INT2_RX_DISP_ERR_OFF 28 #define CHL_INT2_RX_CODE_ERR_OFF 29 #define CHL_INT2_RX_INVLD_DW_OFF 30 #define CHL_INT2_STP_LINK_TIMEOUT_OFF 31 #define CHL_INT0_MSK (PORT_BASE + 0x1c0) #define CHL_INT1_MSK (PORT_BASE + 0x1c4) #define CHL_INT2_MSK (PORT_BASE + 0x1c8) #define SAS_EC_INT_COAL_TIME (PORT_BASE + 0x1cc) #define CHL_INT_COAL_EN (PORT_BASE + 0x1d0) #define SAS_RX_TRAIN_TIMER (PORT_BASE + 0x2a4) #define PHY_CTRL_RDY_MSK (PORT_BASE + 0x2b0) #define PHYCTRL_NOT_RDY_MSK (PORT_BASE + 0x2b4) #define PHYCTRL_DWS_RESET_MSK (PORT_BASE + 0x2b8) #define PHYCTRL_PHY_ENA_MSK (PORT_BASE + 0x2bc) #define SL_RX_BCAST_CHK_MSK (PORT_BASE + 0x2c0) #define PHYCTRL_OOB_RESTART_MSK (PORT_BASE + 0x2c4) #define DMA_TX_STATUS (PORT_BASE + 0x2d0) #define DMA_TX_STATUS_BUSY_OFF 0 #define DMA_TX_STATUS_BUSY_MSK (0x1 << DMA_TX_STATUS_BUSY_OFF) #define DMA_RX_STATUS (PORT_BASE + 0x2e8) #define DMA_RX_STATUS_BUSY_OFF 0 #define DMA_RX_STATUS_BUSY_MSK (0x1 << DMA_RX_STATUS_BUSY_OFF) #define COARSETUNE_TIME (PORT_BASE + 0x304) #define TXDEEMPH_G1 (PORT_BASE + 0x350) #define ERR_CNT_DWS_LOST (PORT_BASE + 0x380) #define ERR_CNT_RESET_PROB (PORT_BASE + 0x384) #define ERR_CNT_INVLD_DW (PORT_BASE + 0x390) #define ERR_CNT_CODE_ERR (PORT_BASE + 0x394) #define ERR_CNT_DISP_ERR (PORT_BASE + 0x398) #define DFX_FIFO_CTRL (PORT_BASE + 0x3a0) #define DFX_FIFO_CTRL_TRIGGER_MODE_OFF 0 #define DFX_FIFO_CTRL_TRIGGER_MODE_MSK (0x7 << DFX_FIFO_CTRL_TRIGGER_MODE_OFF) #define DFX_FIFO_CTRL_DUMP_MODE_OFF 3 #define DFX_FIFO_CTRL_DUMP_MODE_MSK (0x7 << DFX_FIFO_CTRL_DUMP_MODE_OFF) #define DFX_FIFO_CTRL_SIGNAL_SEL_OFF 6 #define DFX_FIFO_CTRL_SIGNAL_SEL_MSK (0xF << DFX_FIFO_CTRL_SIGNAL_SEL_OFF) #define DFX_FIFO_CTRL_DUMP_DISABLE_OFF 10 #define DFX_FIFO_CTRL_DUMP_DISABLE_MSK (0x1 << DFX_FIFO_CTRL_DUMP_DISABLE_OFF) #define DFX_FIFO_TRIGGER (PORT_BASE + 0x3a4) #define DFX_FIFO_TRIGGER_MSK (PORT_BASE + 0x3a8) #define DFX_FIFO_DUMP_MSK (PORT_BASE + 0x3aC) #define DFX_FIFO_RD_DATA (PORT_BASE + 0x3b0) #define DEFAULT_ITCT_HW 2048 /* reset value, not reprogrammed */ #if (HISI_SAS_MAX_DEVICES > DEFAULT_ITCT_HW) #error Max ITCT exceeded #endif #define AXI_MASTER_CFG_BASE (0x5000) #define AM_CTRL_GLOBAL (0x0) #define AM_CTRL_SHUTDOWN_REQ_OFF 0 #define AM_CTRL_SHUTDOWN_REQ_MSK (0x1 << AM_CTRL_SHUTDOWN_REQ_OFF) #define AM_CURR_TRANS_RETURN (0x150) #define AM_CFG_MAX_TRANS (0x5010) #define AM_CFG_SINGLE_PORT_MAX_TRANS (0x5014) #define AXI_CFG (0x5100) #define AM_ROB_ECC_ERR_ADDR (0x510c) #define AM_ROB_ECC_ERR_ADDR_OFF 0 #define AM_ROB_ECC_ERR_ADDR_MSK 0xffffffff /* RAS registers need init */ #define RAS_BASE (0x6000) #define SAS_RAS_INTR0 (RAS_BASE) #define SAS_RAS_INTR1 (RAS_BASE + 0x04) #define SAS_RAS_INTR0_MASK (RAS_BASE + 0x08) #define SAS_RAS_INTR1_MASK (RAS_BASE + 0x0c) #define CFG_SAS_RAS_INTR_MASK (RAS_BASE + 0x1c) #define SAS_RAS_INTR2 (RAS_BASE + 0x20) #define SAS_RAS_INTR2_MASK (RAS_BASE + 0x24) /* HW dma structures */ /* Delivery queue header */ /* dw0 */ #define CMD_HDR_ABORT_FLAG_OFF 0 #define CMD_HDR_ABORT_FLAG_MSK (0x3 << CMD_HDR_ABORT_FLAG_OFF) #define CMD_HDR_ABORT_DEVICE_TYPE_OFF 2 #define CMD_HDR_ABORT_DEVICE_TYPE_MSK (0x1 << CMD_HDR_ABORT_DEVICE_TYPE_OFF) #define CMD_HDR_RESP_REPORT_OFF 5 #define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF) #define CMD_HDR_TLR_CTRL_OFF 6 #define CMD_HDR_TLR_CTRL_MSK (0x3 << CMD_HDR_TLR_CTRL_OFF) #define CMD_HDR_PORT_OFF 18 #define CMD_HDR_PORT_MSK (0xf << CMD_HDR_PORT_OFF) #define CMD_HDR_PRIORITY_OFF 27 #define CMD_HDR_PRIORITY_MSK (0x1 << CMD_HDR_PRIORITY_OFF) #define CMD_HDR_CMD_OFF 29 #define CMD_HDR_CMD_MSK (0x7 << CMD_HDR_CMD_OFF) /* dw1 */ #define CMD_HDR_UNCON_CMD_OFF 3 #define CMD_HDR_DIR_OFF 5 #define CMD_HDR_DIR_MSK (0x3 << CMD_HDR_DIR_OFF) #define CMD_HDR_RESET_OFF 7 #define CMD_HDR_RESET_MSK (0x1 << CMD_HDR_RESET_OFF) #define CMD_HDR_VDTL_OFF 10 #define CMD_HDR_VDTL_MSK (0x1 << CMD_HDR_VDTL_OFF) #define CMD_HDR_FRAME_TYPE_OFF 11 #define CMD_HDR_FRAME_TYPE_MSK (0x1f << CMD_HDR_FRAME_TYPE_OFF) #define CMD_HDR_DEV_ID_OFF 16 #define CMD_HDR_DEV_ID_MSK (0xffff << CMD_HDR_DEV_ID_OFF) /* dw2 */ #define CMD_HDR_CFL_OFF 0 #define CMD_HDR_CFL_MSK (0x1ff << CMD_HDR_CFL_OFF) #define CMD_HDR_NCQ_TAG_OFF 10 #define CMD_HDR_NCQ_TAG_MSK (0x1f << CMD_HDR_NCQ_TAG_OFF) #define CMD_HDR_MRFL_OFF 15 #define CMD_HDR_MRFL_MSK (0x1ff << CMD_HDR_MRFL_OFF) #define CMD_HDR_SG_MOD_OFF 24 #define CMD_HDR_SG_MOD_MSK (0x3 << CMD_HDR_SG_MOD_OFF) /* dw3 */ #define CMD_HDR_IPTT_OFF 0 #define CMD_HDR_IPTT_MSK (0xffff << CMD_HDR_IPTT_OFF) /* dw6 */ #define CMD_HDR_DIF_SGL_LEN_OFF 0 #define CMD_HDR_DIF_SGL_LEN_MSK (0xffff << CMD_HDR_DIF_SGL_LEN_OFF) #define CMD_HDR_DATA_SGL_LEN_OFF 16 #define CMD_HDR_DATA_SGL_LEN_MSK (0xffff << CMD_HDR_DATA_SGL_LEN_OFF) /* dw7 */ #define CMD_HDR_ADDR_MODE_SEL_OFF 15 #define CMD_HDR_ADDR_MODE_SEL_MSK (1 << CMD_HDR_ADDR_MODE_SEL_OFF) #define CMD_HDR_ABORT_IPTT_OFF 16 #define CMD_HDR_ABORT_IPTT_MSK (0xffff << CMD_HDR_ABORT_IPTT_OFF) /* Completion header */ /* dw0 */ #define CMPLT_HDR_CMPLT_OFF 0 #define CMPLT_HDR_CMPLT_MSK (0x3 << CMPLT_HDR_CMPLT_OFF) #define CMPLT_HDR_ERROR_PHASE_OFF 2 #define CMPLT_HDR_ERROR_PHASE_MSK (0xff << CMPLT_HDR_ERROR_PHASE_OFF) /* bit[9:2] Error Phase */ #define ERR_PHASE_RESPONSE_FRAME_REV_STAGE_OFF \ 8 #define ERR_PHASE_RESPONSE_FRAME_REV_STAGE_MSK \ (0x1 << ERR_PHASE_RESPONSE_FRAME_REV_STAGE_OFF) #define CMPLT_HDR_RSPNS_XFRD_OFF 10 #define CMPLT_HDR_RSPNS_XFRD_MSK (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF) #define CMPLT_HDR_RSPNS_GOOD_OFF 11 #define CMPLT_HDR_RSPNS_GOOD_MSK (0x1 << CMPLT_HDR_RSPNS_GOOD_OFF) #define CMPLT_HDR_ERX_OFF 12 #define CMPLT_HDR_ERX_MSK (0x1 << CMPLT_HDR_ERX_OFF) #define CMPLT_HDR_ABORT_STAT_OFF 13 #define CMPLT_HDR_ABORT_STAT_MSK (0x7 << CMPLT_HDR_ABORT_STAT_OFF) /* abort_stat */ #define STAT_IO_NOT_VALID 0x1 #define STAT_IO_NO_DEVICE 0x2 #define STAT_IO_COMPLETE 0x3 #define STAT_IO_ABORTED 0x4 /* dw1 */ #define CMPLT_HDR_IPTT_OFF 0 #define CMPLT_HDR_IPTT_MSK (0xffff << CMPLT_HDR_IPTT_OFF) #define CMPLT_HDR_DEV_ID_OFF 16 #define CMPLT_HDR_DEV_ID_MSK (0xffff << CMPLT_HDR_DEV_ID_OFF) /* dw3 */ #define SATA_DISK_IN_ERROR_STATUS_OFF 8 #define SATA_DISK_IN_ERROR_STATUS_MSK (0x1 << SATA_DISK_IN_ERROR_STATUS_OFF) #define CMPLT_HDR_SATA_DISK_ERR_OFF 16 #define CMPLT_HDR_SATA_DISK_ERR_MSK (0x1 << CMPLT_HDR_SATA_DISK_ERR_OFF) #define CMPLT_HDR_IO_IN_TARGET_OFF 17 #define CMPLT_HDR_IO_IN_TARGET_MSK (0x1 << CMPLT_HDR_IO_IN_TARGET_OFF) /* bit[23:18] ERR_FIS_ATA_STATUS */ #define FIS_ATA_STATUS_ERR_OFF 18 #define FIS_ATA_STATUS_ERR_MSK (0x1 << FIS_ATA_STATUS_ERR_OFF) #define FIS_TYPE_SDB_OFF 31 #define FIS_TYPE_SDB_MSK (0x1 << FIS_TYPE_SDB_OFF) /* ITCT header */ /* qw0 */ #define ITCT_HDR_DEV_TYPE_OFF 0 #define ITCT_HDR_DEV_TYPE_MSK (0x3 << ITCT_HDR_DEV_TYPE_OFF) #define ITCT_HDR_VALID_OFF 2 #define ITCT_HDR_VALID_MSK (0x1 << ITCT_HDR_VALID_OFF) #define ITCT_HDR_MCR_OFF 5 #define ITCT_HDR_MCR_MSK (0xf << ITCT_HDR_MCR_OFF) #define ITCT_HDR_VLN_OFF 9 #define ITCT_HDR_VLN_MSK (0xf << ITCT_HDR_VLN_OFF) #define ITCT_HDR_SMP_TIMEOUT_OFF 16 #define ITCT_HDR_AWT_CONTINUE_OFF 25 #define ITCT_HDR_PORT_ID_OFF 28 #define ITCT_HDR_PORT_ID_MSK (0xf << ITCT_HDR_PORT_ID_OFF) /* qw2 */ #define ITCT_HDR_INLT_OFF 0 #define ITCT_HDR_INLT_MSK (0xffffULL << ITCT_HDR_INLT_OFF) #define ITCT_HDR_RTOLT_OFF 48 #define ITCT_HDR_RTOLT_MSK (0xffffULL << ITCT_HDR_RTOLT_OFF) struct hisi_sas_protect_iu_v3_hw { u32 dw0; u32 lbrtcv; u32 lbrtgv; u32 dw3; u32 dw4; u32 dw5; u32 rsv; }; struct hisi_sas_complete_v3_hdr { __le32 dw0; __le32 dw1; __le32 act; __le32 dw3; }; struct hisi_sas_err_record_v3 { /* dw0 */ __le32 trans_tx_fail_type; /* dw1 */ __le32 trans_rx_fail_type; /* dw2 */ __le16 dma_tx_err_type; __le16 sipc_rx_err_type; /* dw3 */ __le32 dma_rx_err_type; }; #define RX_DATA_LEN_UNDERFLOW_OFF 6 #define RX_DATA_LEN_UNDERFLOW_MSK (1 << RX_DATA_LEN_UNDERFLOW_OFF) #define RX_FIS_STATUS_ERR_OFF 0 #define RX_FIS_STATUS_ERR_MSK (1 << RX_FIS_STATUS_ERR_OFF) #define HISI_SAS_COMMAND_ENTRIES_V3_HW 4096 #define HISI_SAS_MSI_COUNT_V3_HW 32 #define DIR_NO_DATA 0 #define DIR_TO_INI 1 #define DIR_TO_DEVICE 2 #define DIR_RESERVED 3 #define FIS_CMD_IS_UNCONSTRAINED(fis) \ ((fis.command == ATA_CMD_READ_LOG_EXT) || \ (fis.command == ATA_CMD_READ_LOG_DMA_EXT) || \ ((fis.command == ATA_CMD_DEV_RESET) && \ ((fis.control & ATA_SRST) != 0))) #define T10_INSRT_EN_OFF 0 #define T10_INSRT_EN_MSK (1 << T10_INSRT_EN_OFF) #define T10_RMV_EN_OFF 1 #define T10_RMV_EN_MSK (1 << T10_RMV_EN_OFF) #define T10_RPLC_EN_OFF 2 #define T10_RPLC_EN_MSK (1 << T10_RPLC_EN_OFF) #define T10_CHK_EN_OFF 3 #define T10_CHK_EN_MSK (1 << T10_CHK_EN_OFF) #define INCR_LBRT_OFF 5 #define INCR_LBRT_MSK (1 << INCR_LBRT_OFF) #define USR_DATA_BLOCK_SZ_OFF 20 #define USR_DATA_BLOCK_SZ_MSK (0x3 << USR_DATA_BLOCK_SZ_OFF) #define T10_CHK_MSK_OFF 16 #define T10_CHK_REF_TAG_MSK (0xf0 << T10_CHK_MSK_OFF) #define T10_CHK_APP_TAG_MSK (0xc << T10_CHK_MSK_OFF) #define BASE_VECTORS_V3_HW 16 #define MIN_AFFINE_VECTORS_V3_HW (BASE_VECTORS_V3_HW + 1) #define CHNL_INT_STS_MSK 0xeeeeeeee #define CHNL_INT_STS_PHY_MSK 0xe #define CHNL_INT_STS_INT0_MSK BIT(1) #define CHNL_INT_STS_INT1_MSK BIT(2) #define CHNL_INT_STS_INT2_MSK BIT(3) #define CHNL_WIDTH 4 #define BAR_NO_V3_HW 5 enum { DSM_FUNC_ERR_HANDLE_MSI = 0, }; static bool hisi_sas_intr_conv; MODULE_PARM_DESC(intr_conv, "interrupt converge enable (0-1)"); /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */ static int prot_mask; module_param(prot_mask, int, 0444); MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=0x0 "); /* the index of iopoll queues are bigger than interrupt queues' */ static int experimental_iopoll_q_cnt; module_param(experimental_iopoll_q_cnt, int, 0444); MODULE_PARM_DESC(experimental_iopoll_q_cnt, "number of queues to be used as poll mode, def=0"); static void debugfs_work_handler_v3_hw(struct work_struct *work); static void debugfs_snapshot_regs_v3_hw(struct hisi_hba *hisi_hba); static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off) { void __iomem *regs = hisi_hba->regs + off; return readl(regs); } static void hisi_sas_write32(struct hisi_hba *hisi_hba, u32 off, u32 val) { void __iomem *regs = hisi_hba->regs + off; writel(val, regs); } static void hisi_sas_phy_write32(struct hisi_hba *hisi_hba, int phy_no, u32 off, u32 val) { void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off; writel(val, regs); } static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba, int phy_no, u32 off) { void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off; return readl(regs); } #define hisi_sas_read32_poll_timeout(off, val, cond, delay_us, \ timeout_us) \ ({ \ void __iomem *regs = hisi_hba->regs + off; \ readl_poll_timeout(regs, val, cond, delay_us, timeout_us); \ }) #define hisi_sas_read32_poll_timeout_atomic(off, val, cond, delay_us, \ timeout_us) \ ({ \ void __iomem *regs = hisi_hba->regs + off; \ readl_poll_timeout_atomic(regs, val, cond, delay_us, timeout_us);\ }) static void interrupt_enable_v3_hw(struct hisi_hba *hisi_hba) { int i; for (i = 0; i < hisi_hba->queue_count; i++) hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0); hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xfefefefe); hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xfefefefe); hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffc220ff); hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0x155555); for (i = 0; i < hisi_hba->n_phy; i++) { hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xf2057fff); hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffbfe); hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0); hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x0); hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0); } } static void init_reg_v3_hw(struct hisi_hba *hisi_hba) { struct pci_dev *pdev = hisi_hba->pci_dev; int i, j; /* Global registers init */ hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, (u32)((1ULL << hisi_hba->queue_count) - 1)); hisi_sas_write32(hisi_hba, CFG_MAX_TAG, 0xfff0400); /* time / CLK_AHB = 2.5s / 2ns = 0x4A817C80 */ hisi_sas_write32(hisi_hba, TRANS_LOCK_ICT_TIME, 0x4A817C80); hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108); hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x1); hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1); hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1); hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1); hisi_sas_write32(hisi_hba, CQ_INT_CONVERGE_EN, hisi_sas_intr_conv); hisi_sas_write32(hisi_hba, OQ_INT_SRC, 0xffff); hisi_sas_write32(hisi_hba, ENT_INT_SRC1, 0xffffffff); hisi_sas_write32(hisi_hba, ENT_INT_SRC2, 0xffffffff); hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff); hisi_sas_write32(hisi_hba, CHNL_PHYUPDOWN_INT_MSK, 0x0); hisi_sas_write32(hisi_hba, CHNL_ENT_INT_MSK, 0x0); hisi_sas_write32(hisi_hba, HGC_COM_INT_MSK, 0x0); hisi_sas_write32(hisi_hba, AWQOS_AWCACHE_CFG, 0xf0f0); hisi_sas_write32(hisi_hba, ARQOS_ARCACHE_CFG, 0xf0f0); hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1); if (pdev->revision < 0x30) hisi_sas_write32(hisi_hba, SAS_AXI_USER3, 0); interrupt_enable_v3_hw(hisi_hba); for (i = 0; i < hisi_hba->n_phy; i++) { enum sas_linkrate max; struct hisi_sas_phy *phy = &hisi_hba->phy[i]; struct asd_sas_phy *sas_phy = &phy->sas_phy; u32 prog_phy_link_rate = hisi_sas_phy_read32(hisi_hba, i, PROG_PHY_LINK_RATE); prog_phy_link_rate &= ~CFG_PROG_PHY_LINK_RATE_MSK; if (!sas_phy->phy || (sas_phy->phy->maximum_linkrate < SAS_LINK_RATE_1_5_GBPS)) max = SAS_LINK_RATE_12_0_GBPS; else max = sas_phy->phy->maximum_linkrate; prog_phy_link_rate |= hisi_sas_get_prog_phy_linkrate_mask(max); hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, prog_phy_link_rate); hisi_sas_phy_write32(hisi_hba, i, SAS_RX_TRAIN_TIMER, 0x13e80); hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff); hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff); hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff); hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000); hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0); hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0); hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x1); hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, 0x7f7a120); hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER, 0x2a0a01); hisi_sas_phy_write32(hisi_hba, i, SAS_EC_INT_COAL_TIME, 0x30f4240); hisi_sas_phy_write32(hisi_hba, i, AIP_LIMIT, 0x2ffff); /* set value through firmware for 920B and later version */ if (pdev->revision < 0x30) { hisi_sas_phy_write32(hisi_hba, i, SAS_SSP_CON_TIMER_CFG, 0x32); hisi_sas_phy_write32(hisi_hba, i, SERDES_CFG, 0xffc00); /* used for 12G negotiate */ hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e); } /* get default FFE configuration for BIST */ for (j = 0; j < FFE_CFG_MAX; j++) { u32 val = hisi_sas_phy_read32(hisi_hba, i, TXDEEMPH_G1 + (j * 0x4)); hisi_hba->debugfs_bist_ffe[i][j] = val; } } for (i = 0; i < hisi_hba->queue_count; i++) { /* Delivery queue */ hisi_sas_write32(hisi_hba, DLVRY_Q_0_BASE_ADDR_HI + (i * 0x14), upper_32_bits(hisi_hba->cmd_hdr_dma[i])); hisi_sas_write32(hisi_hba, DLVRY_Q_0_BASE_ADDR_LO + (i * 0x14), lower_32_bits(hisi_hba->cmd_hdr_dma[i])); hisi_sas_write32(hisi_hba, DLVRY_Q_0_DEPTH + (i * 0x14), HISI_SAS_QUEUE_SLOTS); /* Completion queue */ hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_HI + (i * 0x14), upper_32_bits(hisi_hba->complete_hdr_dma[i])); hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_LO + (i * 0x14), lower_32_bits(hisi_hba->complete_hdr_dma[i])); hisi_sas_write32(hisi_hba, COMPL_Q_0_DEPTH + (i * 0x14), HISI_SAS_QUEUE_SLOTS); } /* itct */ hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_LO, lower_32_bits(hisi_hba->itct_dma)); hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_HI, upper_32_bits(hisi_hba->itct_dma)); /* iost */ hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_LO, lower_32_bits(hisi_hba->iost_dma)); hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_HI, upper_32_bits(hisi_hba->iost_dma)); /* breakpoint */ hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_LO, lower_32_bits(hisi_hba->breakpoint_dma)); hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_HI, upper_32_bits(hisi_hba->breakpoint_dma)); /* SATA broken msg */ hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_LO, lower_32_bits(hisi_hba->sata_breakpoint_dma)); hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_HI, upper_32_bits(hisi_hba->sata_breakpoint_dma)); /* SATA initial fis */ hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_LO, lower_32_bits(hisi_hba->initial_fis_dma)); hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_HI, upper_32_bits(hisi_hba->initial_fis_dma)); /* RAS registers init */ hisi_sas_write32(hisi_hba, SAS_RAS_INTR0_MASK, 0x0); hisi_sas_write32(hisi_hba, SAS_RAS_INTR1_MASK, 0x0); hisi_sas_write32(hisi_hba, SAS_RAS_INTR2_MASK, 0x0); hisi_sas_write32(hisi_hba, CFG_SAS_RAS_INTR_MASK, 0x0); /* LED registers init */ hisi_sas_write32(hisi_hba, SAS_CFG_DRIVE_VLD, 0x80000ff); hisi_sas_write32(hisi_hba, SAS_GPIO_TX_0_1, 0x80808080); hisi_sas_write32(hisi_hba, SAS_GPIO_TX_0_1 + 0x4, 0x80808080); /* Configure blink generator rate A to 1Hz and B to 4Hz */ hisi_sas_write32(hisi_hba, SAS_GPIO_CFG_1, 0x121700); hisi_sas_write32(hisi_hba, SAS_GPIO_CFG_0, 0x800000); } static void config_phy_opt_mode_v3_hw(struct hisi_hba *hisi_hba, int phy_no) { u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); cfg &= ~PHY_CFG_DC_OPT_MSK; cfg |= 1 << PHY_CFG_DC_OPT_OFF; hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); } static void config_id_frame_v3_hw(struct hisi_hba *hisi_hba, int phy_no) { struct sas_identify_frame identify_frame; u32 *identify_buffer; memset(&identify_frame, 0, sizeof(identify_frame)); identify_frame.dev_type = SAS_END_DEVICE; identify_frame.frame_type = 0; identify_frame._un1 = 1; identify_frame.initiator_bits = SAS_PROTOCOL_ALL; identify_frame.target_bits = SAS_PROTOCOL_NONE; memcpy(&identify_frame._un4_11[0], hisi_hba->sas_addr, SAS_ADDR_SIZE); memcpy(&identify_frame.sas_addr[0], hisi_hba->sas_addr, SAS_ADDR_SIZE); identify_frame.phy_id = phy_no; identify_buffer = (u32 *)(&identify_frame); hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0, __swab32(identify_buffer[0])); hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1, __swab32(identify_buffer[1])); hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2, __swab32(identify_buffer[2])); hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3, __swab32(identify_buffer[3])); hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4, __swab32(identify_buffer[4])); hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5, __swab32(identify_buffer[5])); } static void setup_itct_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_device *sas_dev) { struct domain_device *device = sas_dev->sas_device; struct device *dev = hisi_hba->dev; u64 qw0, device_id = sas_dev->device_id; struct hisi_sas_itct *itct = &hisi_hba->itct[device_id]; struct domain_device *parent_dev = device->parent; struct asd_sas_port *sas_port = device->port; struct hisi_sas_port *port = to_hisi_sas_port(sas_port); u64 sas_addr; memset(itct, 0, sizeof(*itct)); /* qw0 */ qw0 = 0; switch (sas_dev->dev_type) { case SAS_END_DEVICE: case SAS_EDGE_EXPANDER_DEVICE: case SAS_FANOUT_EXPANDER_DEVICE: qw0 = HISI_SAS_DEV_TYPE_SSP << ITCT_HDR_DEV_TYPE_OFF; break; case SAS_SATA_DEV: case SAS_SATA_PENDING: if (parent_dev && dev_is_expander(parent_dev->dev_type)) qw0 = HISI_SAS_DEV_TYPE_STP << ITCT_HDR_DEV_TYPE_OFF; else qw0 = HISI_SAS_DEV_TYPE_SATA << ITCT_HDR_DEV_TYPE_OFF; break; default: dev_warn(dev, "setup itct: unsupported dev type (%d)\n", sas_dev->dev_type); } qw0 |= ((1 << ITCT_HDR_VALID_OFF) | (device->linkrate << ITCT_HDR_MCR_OFF) | (1 << ITCT_HDR_VLN_OFF) | (0xfa << ITCT_HDR_SMP_TIMEOUT_OFF) | (1 << ITCT_HDR_AWT_CONTINUE_OFF) | (port->id << ITCT_HDR_PORT_ID_OFF)); itct->qw0 = cpu_to_le64(qw0); /* qw1 */ memcpy(&sas_addr, device->sas_addr, SAS_ADDR_SIZE); itct->sas_addr = cpu_to_le64(__swab64(sas_addr)); /* qw2 */ if (!dev_is_sata(device)) itct->qw2 = cpu_to_le64((5000ULL << ITCT_HDR_INLT_OFF) | (0x1ULL << ITCT_HDR_RTOLT_OFF)); } static int clear_itct_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_device *sas_dev) { DECLARE_COMPLETION_ONSTACK(completion); u64 dev_id = sas_dev->device_id; struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id]; u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); struct device *dev = hisi_hba->dev; sas_dev->completion = &completion; /* clear the itct interrupt state */ if (ENT_INT_SRC3_ITC_INT_MSK & reg_val) hisi_sas_write32(hisi_hba, ENT_INT_SRC3, ENT_INT_SRC3_ITC_INT_MSK); /* clear the itct table */ reg_val = ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK); hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val); if (!wait_for_completion_timeout(sas_dev->completion, HISI_SAS_CLEAR_ITCT_TIMEOUT)) { dev_warn(dev, "failed to clear ITCT\n"); return -ETIMEDOUT; } memset(itct, 0, sizeof(struct hisi_sas_itct)); return 0; } static void dereg_device_v3_hw(struct hisi_hba *hisi_hba, struct domain_device *device) { struct hisi_sas_slot *slot, *slot2; struct hisi_sas_device *sas_dev = device->lldd_dev; u32 cfg_abt_set_query_iptt; cfg_abt_set_query_iptt = hisi_sas_read32(hisi_hba, CFG_ABT_SET_QUERY_IPTT); spin_lock(&sas_dev->lock); list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry) { cfg_abt_set_query_iptt &= ~CFG_SET_ABORTED_IPTT_MSK; cfg_abt_set_query_iptt |= (1 << CFG_SET_ABORTED_EN_OFF) | (slot->idx << CFG_SET_ABORTED_IPTT_OFF); hisi_sas_write32(hisi_hba, CFG_ABT_SET_QUERY_IPTT, cfg_abt_set_query_iptt); } spin_unlock(&sas_dev->lock); cfg_abt_set_query_iptt &= ~(1 << CFG_SET_ABORTED_EN_OFF); hisi_sas_write32(hisi_hba, CFG_ABT_SET_QUERY_IPTT, cfg_abt_set_query_iptt); hisi_sas_write32(hisi_hba, CFG_ABT_SET_IPTT_DONE, 1 << CFG_ABT_SET_IPTT_DONE_OFF); } static int reset_hw_v3_hw(struct hisi_hba *hisi_hba) { struct device *dev = hisi_hba->dev; int ret; u32 val; hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0); /* Disable all of the PHYs */ hisi_sas_stop_phys(hisi_hba); udelay(50); /* Ensure axi bus idle */ ret = hisi_sas_read32_poll_timeout(AXI_CFG, val, !val, 20000, 1000000); if (ret) { dev_err(dev, "axi bus is not idle, ret = %d!\n", ret); return -EIO; } if (ACPI_HANDLE(dev)) { acpi_status s; s = acpi_evaluate_object(ACPI_HANDLE(dev), "_RST", NULL, NULL); if (ACPI_FAILURE(s)) { dev_err(dev, "Reset failed\n"); return -EIO; } } else { dev_err(dev, "no reset method!\n"); return -EINVAL; } return 0; } static int hw_init_v3_hw(struct hisi_hba *hisi_hba) { struct device *dev = hisi_hba->dev; struct acpi_device *acpi_dev; union acpi_object *obj; guid_t guid; int rc; rc = reset_hw_v3_hw(hisi_hba); if (rc) { dev_err(dev, "hisi_sas_reset_hw failed, rc=%d\n", rc); return rc; } msleep(100); init_reg_v3_hw(hisi_hba); if (guid_parse("D5918B4B-37AE-4E10-A99F-E5E8A6EF4C1F", &guid)) { dev_err(dev, "Parse GUID failed\n"); return -EINVAL; } /* * This DSM handles some hardware-related configurations: * 1. Switch over to MSI error handling in kernel * 2. BIOS *may* reset some register values through this method */ obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &guid, 0, DSM_FUNC_ERR_HANDLE_MSI, NULL); if (!obj) dev_warn(dev, "can not find DSM method, ignore\n"); else ACPI_FREE(obj); acpi_dev = ACPI_COMPANION(dev); if (!acpi_device_power_manageable(acpi_dev)) dev_notice(dev, "neither _PS0 nor _PR0 is defined\n"); return 0; } static void enable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no) { u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); cfg |= PHY_CFG_ENA_MSK; cfg &= ~PHY_CFG_PHY_RST_MSK; hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); } static void disable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no) { u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); u32 irq_msk = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2_MSK); static const u32 msk = BIT(CHL_INT2_RX_DISP_ERR_OFF) | BIT(CHL_INT2_RX_CODE_ERR_OFF) | BIT(CHL_INT2_RX_INVLD_DW_OFF); u32 state; hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2_MSK, msk | irq_msk); cfg &= ~PHY_CFG_ENA_MSK; hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); mdelay(50); state = hisi_sas_read32(hisi_hba, PHY_STATE); if (state & BIT(phy_no)) { cfg |= PHY_CFG_PHY_RST_MSK; hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); } udelay(1); hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_INVLD_DW); hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DISP_ERR); hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_CODE_ERR); hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2, msk); hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2_MSK, irq_msk); } static void start_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no) { config_id_frame_v3_hw(hisi_hba, phy_no); config_phy_opt_mode_v3_hw(hisi_hba, phy_no); enable_phy_v3_hw(hisi_hba, phy_no); } static void phy_hard_reset_v3_hw(struct hisi_hba *hisi_hba, int phy_no) { struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; u32 txid_auto; hisi_sas_phy_enable(hisi_hba, phy_no, 0); if (phy->identify.device_type == SAS_END_DEVICE) { txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO); hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, txid_auto | TX_HARDRST_MSK); } msleep(100); hisi_sas_phy_enable(hisi_hba, phy_no, 1); } static enum sas_linkrate phy_get_max_linkrate_v3_hw(void) { return SAS_LINK_RATE_12_0_GBPS; } static void phys_init_v3_hw(struct hisi_hba *hisi_hba) { int i; for (i = 0; i < hisi_hba->n_phy; i++) { struct hisi_sas_phy *phy = &hisi_hba->phy[i]; struct asd_sas_phy *sas_phy = &phy->sas_phy; if (!sas_phy->phy->enabled) continue; hisi_sas_phy_enable(hisi_hba, i, 1); } } static void sl_notify_ssp_v3_hw(struct hisi_hba *hisi_hba, int phy_no) { u32 sl_control; sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); sl_control |= SL_CONTROL_NOTIFY_EN_MSK; hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); msleep(1); sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); sl_control &= ~SL_CONTROL_NOTIFY_EN_MSK; hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); } static int get_wideport_bitmap_v3_hw(struct hisi_hba *hisi_hba, int port_id) { int i, bitmap = 0; u32 phy_port_num_ma = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); for (i = 0; i < hisi_hba->n_phy; i++) if (phy_state & BIT(i)) if (((phy_port_num_ma >> (i * 4)) & 0xf) == port_id) bitmap |= BIT(i); return bitmap; } static void start_delivery_v3_hw(struct hisi_sas_dq *dq) { struct hisi_hba *hisi_hba = dq->hisi_hba; struct hisi_sas_slot *s, *s1, *s2 = NULL; int dlvry_queue = dq->id; int wp; list_for_each_entry_safe(s, s1, &dq->list, delivery) { if (!s->ready) break; s2 = s; list_del(&s->delivery); } if (!s2) return; /* * Ensure that memories for slots built on other CPUs is observed. */ smp_rmb(); wp = (s2->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS; hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp); } static void prep_prd_sge_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot, struct hisi_sas_cmd_hdr *hdr, struct scatterlist *scatter, int n_elem) { struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot); struct scatterlist *sg; int i; for_each_sg(scatter, sg, n_elem, i) { struct hisi_sas_sge *entry = &sge_page->sge[i]; entry->addr = cpu_to_le64(sg_dma_address(sg)); entry->page_ctrl_0 = entry->page_ctrl_1 = 0; entry->data_len = cpu_to_le32(sg_dma_len(sg)); entry->data_off = 0; } hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot)); hdr->sg_len |= cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF); } static void prep_prd_sge_dif_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot, struct hisi_sas_cmd_hdr *hdr, struct scatterlist *scatter, int n_elem) { struct hisi_sas_sge_dif_page *sge_dif_page; struct scatterlist *sg; int i; sge_dif_page = hisi_sas_sge_dif_addr_mem(slot); for_each_sg(scatter, sg, n_elem, i) { struct hisi_sas_sge *entry = &sge_dif_page->sge[i]; entry->addr = cpu_to_le64(sg_dma_address(sg)); entry->page_ctrl_0 = 0; entry->page_ctrl_1 = 0; entry->data_len = cpu_to_le32(sg_dma_len(sg)); entry->data_off = 0; } hdr->dif_prd_table_addr = cpu_to_le64(hisi_sas_sge_dif_addr_dma(slot)); hdr->sg_len |= cpu_to_le32(n_elem << CMD_HDR_DIF_SGL_LEN_OFF); } static u32 get_prot_chk_msk_v3_hw(struct scsi_cmnd *scsi_cmnd) { unsigned char prot_flags = scsi_cmnd->prot_flags; if (prot_flags & SCSI_PROT_REF_CHECK) return T10_CHK_APP_TAG_MSK; return T10_CHK_REF_TAG_MSK | T10_CHK_APP_TAG_MSK; } static void fill_prot_v3_hw(struct scsi_cmnd *scsi_cmnd, struct hisi_sas_protect_iu_v3_hw *prot) { unsigned char prot_op = scsi_get_prot_op(scsi_cmnd); unsigned int interval = scsi_prot_interval(scsi_cmnd); u32 lbrt_chk_val = t10_pi_ref_tag(scsi_cmd_to_rq(scsi_cmnd)); switch (prot_op) { case SCSI_PROT_READ_INSERT: prot->dw0 |= T10_INSRT_EN_MSK; prot->lbrtgv = lbrt_chk_val; break; case SCSI_PROT_READ_STRIP: prot->dw0 |= (T10_RMV_EN_MSK | T10_CHK_EN_MSK); prot->lbrtcv = lbrt_chk_val; prot->dw4 |= get_prot_chk_msk_v3_hw(scsi_cmnd); break; case SCSI_PROT_READ_PASS: prot->dw0 |= T10_CHK_EN_MSK; prot->lbrtcv = lbrt_chk_val; prot->dw4 |= get_prot_chk_msk_v3_hw(scsi_cmnd); break; case SCSI_PROT_WRITE_INSERT: prot->dw0 |= T10_INSRT_EN_MSK; prot->lbrtgv = lbrt_chk_val; break; case SCSI_PROT_WRITE_STRIP: prot->dw0 |= (T10_RMV_EN_MSK | T10_CHK_EN_MSK); prot->lbrtcv = lbrt_chk_val; break; case SCSI_PROT_WRITE_PASS: prot->dw0 |= T10_CHK_EN_MSK; prot->lbrtcv = lbrt_chk_val; prot->dw4 |= get_prot_chk_msk_v3_hw(scsi_cmnd); break; default: WARN(1, "prot_op(0x%x) is not valid\n", prot_op); break; } switch (interval) { case 512: break; case 4096: prot->dw0 |= (0x1 << USR_DATA_BLOCK_SZ_OFF); break; case 520: prot->dw0 |= (0x2 << USR_DATA_BLOCK_SZ_OFF); break; default: WARN(1, "protection interval (0x%x) invalid\n", interval); break; } prot->dw0 |= INCR_LBRT_MSK; } static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) { struct sas_task *task = slot->task; struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; struct domain_device *device = task->dev; struct hisi_sas_device *sas_dev = device->lldd_dev; struct hisi_sas_port *port = slot->port; struct sas_ssp_task *ssp_task = &task->ssp_task; struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; struct sas_tmf_task *tmf = slot->tmf; int has_data = 0, priority = !!tmf; unsigned char prot_op; u8 *buf_cmd; u32 dw1 = 0, dw2 = 0, len = 0; hdr->dw0 = cpu_to_le32((1 << CMD_HDR_RESP_REPORT_OFF) | (2 << CMD_HDR_TLR_CTRL_OFF) | (port->id << CMD_HDR_PORT_OFF) | (priority << CMD_HDR_PRIORITY_OFF) | (1 << CMD_HDR_CMD_OFF)); /* ssp */ dw1 = 1 << CMD_HDR_VDTL_OFF; if (tmf) { dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF; dw1 |= DIR_NO_DATA << CMD_HDR_DIR_OFF; } else { prot_op = scsi_get_prot_op(scsi_cmnd); dw1 |= 1 << CMD_HDR_FRAME_TYPE_OFF; switch (scsi_cmnd->sc_data_direction) { case DMA_TO_DEVICE: has_data = 1; dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF; break; case DMA_FROM_DEVICE: has_data = 1; dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF; break; default: dw1 &= ~CMD_HDR_DIR_MSK; } } /* map itct entry */ dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; dw2 = (((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr) + 3) / 4) << CMD_HDR_CFL_OFF) | ((HISI_SAS_MAX_SSP_RESP_SZ / 4) << CMD_HDR_MRFL_OFF) | (2 << CMD_HDR_SG_MOD_OFF); hdr->dw2 = cpu_to_le32(dw2); hdr->transfer_tags = cpu_to_le32(slot->idx); if (has_data) { prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter, slot->n_elem); if (scsi_prot_sg_count(scsi_cmnd)) prep_prd_sge_dif_v3_hw(hisi_hba, slot, hdr, scsi_prot_sglist(scsi_cmnd), slot->n_elem_dif); } hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot) + sizeof(struct ssp_frame_hdr); memcpy(buf_cmd, &task->ssp_task.LUN, 8); if (!tmf) { buf_cmd[9] = ssp_task->task_attr; memcpy(buf_cmd + 12, scsi_cmnd->cmnd, scsi_cmnd->cmd_len); } else { buf_cmd[10] = tmf->tmf; switch (tmf->tmf) { case TMF_ABORT_TASK: case TMF_QUERY_TASK: buf_cmd[12] = (tmf->tag_of_task_to_be_managed >> 8) & 0xff; buf_cmd[13] = tmf->tag_of_task_to_be_managed & 0xff; break; default: break; } } if (has_data && (prot_op != SCSI_PROT_NORMAL)) { struct hisi_sas_protect_iu_v3_hw prot; u8 *buf_cmd_prot; hdr->dw7 |= cpu_to_le32(1 << CMD_HDR_ADDR_MODE_SEL_OFF); dw1 |= CMD_HDR_PIR_MSK; buf_cmd_prot = hisi_sas_cmd_hdr_addr_mem(slot) + sizeof(struct ssp_frame_hdr) + sizeof(struct ssp_command_iu); memset(&prot, 0, sizeof(struct hisi_sas_protect_iu_v3_hw)); fill_prot_v3_hw(scsi_cmnd, &prot); memcpy(buf_cmd_prot, &prot, sizeof(struct hisi_sas_protect_iu_v3_hw)); /* * For READ, we need length of info read to memory, while for * WRITE we need length of data written to the disk. */ if (prot_op == SCSI_PROT_WRITE_INSERT || prot_op == SCSI_PROT_READ_INSERT || prot_op == SCSI_PROT_WRITE_PASS || prot_op == SCSI_PROT_READ_PASS) { unsigned int interval = scsi_prot_interval(scsi_cmnd); unsigned int ilog2_interval = ilog2(interval); len = (task->total_xfer_len >> ilog2_interval) * 8; } } hdr->dw1 = cpu_to_le32(dw1); hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len + len); } static void prep_smp_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) { struct sas_task *task = slot->task; struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; struct domain_device *device = task->dev; struct hisi_sas_port *port = slot->port; struct scatterlist *sg_req; struct hisi_sas_device *sas_dev = device->lldd_dev; dma_addr_t req_dma_addr; unsigned int req_len; /* req */ sg_req = &task->smp_task.smp_req; req_len = sg_dma_len(sg_req); req_dma_addr = sg_dma_address(sg_req); /* create header */ /* dw0 */ hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) | (1 << CMD_HDR_PRIORITY_OFF) | /* high pri */ (2 << CMD_HDR_CMD_OFF)); /* smp */ /* map itct entry */ hdr->dw1 = cpu_to_le32((sas_dev->device_id << CMD_HDR_DEV_ID_OFF) | (1 << CMD_HDR_FRAME_TYPE_OFF) | (DIR_NO_DATA << CMD_HDR_DIR_OFF)); /* dw2 */ hdr->dw2 = cpu_to_le32((((req_len - 4) / 4) << CMD_HDR_CFL_OFF) | (HISI_SAS_MAX_SMP_RESP_SZ / 4 << CMD_HDR_MRFL_OFF)); hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF); hdr->cmd_table_addr = cpu_to_le64(req_dma_addr); hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); } static void prep_ata_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) { struct sas_task *task = slot->task; struct domain_device *device = task->dev; struct domain_device *parent_dev = device->parent; struct hisi_sas_device *sas_dev = device->lldd_dev; struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; struct asd_sas_port *sas_port = device->port; struct hisi_sas_port *port = to_hisi_sas_port(sas_port); u8 *buf_cmd; int has_data = 0, hdr_tag = 0; u32 dw1 = 0, dw2 = 0; hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF); if (parent_dev && dev_is_expander(parent_dev->dev_type)) hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF); else hdr->dw0 |= cpu_to_le32(4U << CMD_HDR_CMD_OFF); switch (task->data_dir) { case DMA_TO_DEVICE: has_data = 1; dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF; break; case DMA_FROM_DEVICE: has_data = 1; dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF; break; default: dw1 &= ~CMD_HDR_DIR_MSK; } if ((task->ata_task.fis.command == ATA_CMD_DEV_RESET) && (task->ata_task.fis.control & ATA_SRST)) dw1 |= 1 << CMD_HDR_RESET_OFF; dw1 |= (hisi_sas_get_ata_protocol( &task->ata_task.fis, task->data_dir)) << CMD_HDR_FRAME_TYPE_OFF; dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; if (FIS_CMD_IS_UNCONSTRAINED(task->ata_task.fis)) dw1 |= 1 << CMD_HDR_UNCON_CMD_OFF; hdr->dw1 = cpu_to_le32(dw1); /* dw2 */ if (task->ata_task.use_ncq) { struct ata_queued_cmd *qc = task->uldd_task; hdr_tag = qc->tag; task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF; } dw2 |= (HISI_SAS_MAX_STP_RESP_SZ / 4) << CMD_HDR_CFL_OFF | 2 << CMD_HDR_SG_MOD_OFF; hdr->dw2 = cpu_to_le32(dw2); /* dw3 */ hdr->transfer_tags = cpu_to_le32(slot->idx); if (has_data) prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter, slot->n_elem); hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot); if (likely(!task->ata_task.device_control_reg_update)) task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ /* fill in command FIS */ memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); } static void prep_abort_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) { struct sas_task *task = slot->task; struct sas_internal_abort_task *abort = &task->abort_task; struct domain_device *dev = task->dev; struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; struct hisi_sas_port *port = slot->port; struct hisi_sas_device *sas_dev = dev->lldd_dev; bool sata = dev_is_sata(dev); /* dw0 */ hdr->dw0 = cpu_to_le32((5U << CMD_HDR_CMD_OFF) | /* abort */ (port->id << CMD_HDR_PORT_OFF) | (sata << CMD_HDR_ABORT_DEVICE_TYPE_OFF) | (abort->type << CMD_HDR_ABORT_FLAG_OFF)); /* dw1 */ hdr->dw1 = cpu_to_le32(sas_dev->device_id << CMD_HDR_DEV_ID_OFF); /* dw7 */ hdr->dw7 = cpu_to_le32(abort->tag << CMD_HDR_ABORT_IPTT_OFF); hdr->transfer_tags = cpu_to_le32(slot->idx); } static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba) { int i; irqreturn_t res; u32 context, port_id, link_rate; struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; struct asd_sas_phy *sas_phy = &phy->sas_phy; struct device *dev = hisi_hba->dev; hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1); port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); port_id = (port_id >> (4 * phy_no)) & 0xf; link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE); link_rate = (link_rate >> (phy_no * 4)) & 0xf; if (port_id == 0xf) { dev_err(dev, "phyup: phy%d invalid portid\n", phy_no); res = IRQ_NONE; goto end; } sas_phy->linkrate = link_rate; phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); /* Check for SATA dev */ context = hisi_sas_read32(hisi_hba, PHY_CONTEXT); if (context & (1 << phy_no)) { struct hisi_sas_initial_fis *initial_fis; struct dev_to_host_fis *fis; u8 attached_sas_addr[SAS_ADDR_SIZE] = {0}; struct Scsi_Host *shost = hisi_hba->shost; dev_info(dev, "phyup: phy%d link_rate=%d(sata)\n", phy_no, link_rate); initial_fis = &hisi_hba->initial_fis[phy_no]; fis = &initial_fis->fis; /* check ERR bit of Status Register */ if (fis->status & ATA_ERR) { dev_warn(dev, "sata int: phy%d FIS status: 0x%x\n", phy_no, fis->status); hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); res = IRQ_NONE; goto end; } sas_phy->oob_mode = SATA_OOB_MODE; attached_sas_addr[0] = 0x50; attached_sas_addr[6] = shost->host_no; attached_sas_addr[7] = phy_no; memcpy(sas_phy->attached_sas_addr, attached_sas_addr, SAS_ADDR_SIZE); memcpy(sas_phy->frame_rcvd, fis, sizeof(struct dev_to_host_fis)); phy->phy_type |= PORT_TYPE_SATA; phy->identify.device_type = SAS_SATA_DEV; phy->frame_rcvd_size = sizeof(struct dev_to_host_fis); phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; } else { u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd; struct sas_identify_frame *id = (struct sas_identify_frame *)frame_rcvd; dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate); for (i = 0; i < 6; i++) { u32 idaf = hisi_sas_phy_read32(hisi_hba, phy_no, RX_IDAF_DWORD0 + (i * 4)); frame_rcvd[i] = __swab32(idaf); } sas_phy->oob_mode = SAS_OOB_MODE; memcpy(sas_phy->attached_sas_addr, &id->sas_addr, SAS_ADDR_SIZE); phy->phy_type |= PORT_TYPE_SAS; phy->identify.device_type = id->dev_type; phy->frame_rcvd_size = sizeof(struct sas_identify_frame); if (phy->identify.device_type == SAS_END_DEVICE) phy->identify.target_port_protocols = SAS_PROTOCOL_SSP; else if (phy->identify.device_type != SAS_PHY_UNUSED) phy->identify.target_port_protocols = SAS_PROTOCOL_SMP; } phy->port_id = port_id; /* * Call pm_runtime_get_noresume() which pairs with * hisi_sas_phyup_pm_work() -> pm_runtime_put_sync(). * For failure call pm_runtime_put() as we are in a hardirq context. */ pm_runtime_get_noresume(dev); res = hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP_PM); if (!res) pm_runtime_put(dev); res = IRQ_HANDLED; spin_lock(&phy->lock); /* Delete timer and set phy_attached atomically */ del_timer(&phy->timer); phy->phy_attached = 1; spin_unlock(&phy->lock); end: if (phy->reset_completion) complete(phy->reset_completion); hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_SL_PHY_ENABLE_MSK); hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 0); return res; } static irqreturn_t phy_down_v3_hw(int phy_no, struct hisi_hba *hisi_hba) { struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; u32 phy_state, sl_ctrl, txid_auto; struct device *dev = hisi_hba->dev; atomic_inc(&phy->down_cnt); del_timer(&phy->timer); hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1); phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); dev_info(dev, "phydown: phy%d phy_state=0x%x\n", phy_no, phy_state); hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0, GFP_ATOMIC); sl_ctrl = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_ctrl&(~SL_CTA_MSK)); txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO); hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, txid_auto | CT3_MSK); hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_NOT_RDY_MSK); hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 0); return IRQ_HANDLED; } static irqreturn_t phy_bcast_v3_hw(int phy_no, struct hisi_hba *hisi_hba) { struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; u32 bcast_status; hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1); bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS); if (bcast_status & RX_BCAST_CHG_MSK) hisi_sas_phy_bcast(phy); hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_SL_RX_BCST_ACK_MSK); hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0); return IRQ_HANDLED; } static irqreturn_t int_phy_up_down_bcast_v3_hw(int irq_no, void *p) { struct hisi_hba *hisi_hba = p; u32 irq_msk; int phy_no = 0; irqreturn_t res = IRQ_NONE; irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS) & 0x11111111; while (irq_msk) { if (irq_msk & 1) { u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT0); u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); int rdy = phy_state & (1 << phy_no); if (rdy) { if (irq_value & CHL_INT0_SL_PHY_ENABLE_MSK) /* phy up */ if (phy_up_v3_hw(phy_no, hisi_hba) == IRQ_HANDLED) res = IRQ_HANDLED; if (irq_value & CHL_INT0_SL_RX_BCST_ACK_MSK) /* phy bcast */ if (phy_bcast_v3_hw(phy_no, hisi_hba) == IRQ_HANDLED) res = IRQ_HANDLED; } else { if (irq_value & CHL_INT0_NOT_RDY_MSK) /* phy down */ if (phy_down_v3_hw(phy_no, hisi_hba) == IRQ_HANDLED) res = IRQ_HANDLED; } } irq_msk >>= 4; phy_no++; } return res; } static const struct hisi_sas_hw_error port_axi_error[] = { { .irq_msk = BIT(CHL_INT1_DMAC_TX_ECC_MB_ERR_OFF), .msg = "dmac_tx_ecc_bad_err", }, { .irq_msk = BIT(CHL_INT1_DMAC_RX_ECC_MB_ERR_OFF), .msg = "dmac_rx_ecc_bad_err", }, { .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF), .msg = "dma_tx_axi_wr_err", }, { .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF), .msg = "dma_tx_axi_rd_err", }, { .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF), .msg = "dma_rx_axi_wr_err", }, { .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF), .msg = "dma_rx_axi_rd_err", }, { .irq_msk = BIT(CHL_INT1_DMAC_TX_FIFO_ERR_OFF), .msg = "dma_tx_fifo_err", }, { .irq_msk = BIT(CHL_INT1_DMAC_RX_FIFO_ERR_OFF), .msg = "dma_rx_fifo_err", }, { .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_RUSER_ERR_OFF), .msg = "dma_tx_axi_ruser_err", }, { .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_RUSER_ERR_OFF), .msg = "dma_rx_axi_ruser_err", }, }; static void handle_chl_int1_v3_hw(struct hisi_hba *hisi_hba, int phy_no) { u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT1); u32 irq_msk = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT1_MSK); struct device *dev = hisi_hba->dev; int i; irq_value &= ~irq_msk; if (!irq_value) { dev_warn(dev, "phy%d channel int 1 received with status bits cleared\n", phy_no); return; } for (i = 0; i < ARRAY_SIZE(port_axi_error); i++) { const struct hisi_sas_hw_error *error = &port_axi_error[i]; if (!(irq_value & error->irq_msk)) continue; dev_err(dev, "%s error (phy%d 0x%x) found!\n", error->msg, phy_no, irq_value); queue_work(hisi_hba->wq, &hisi_hba->rst_work); } hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT1, irq_value); } static void phy_get_events_v3_hw(struct hisi_hba *hisi_hba, int phy_no) { struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; struct asd_sas_phy *sas_phy = &phy->sas_phy; struct sas_phy *sphy = sas_phy->phy; unsigned long flags; u32 reg_value; spin_lock_irqsave(&phy->lock, flags); /* loss dword sync */ reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DWS_LOST); sphy->loss_of_dword_sync_count += reg_value; /* phy reset problem */ reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_RESET_PROB); sphy->phy_reset_problem_count += reg_value; /* invalid dword */ reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_INVLD_DW); sphy->invalid_dword_count += reg_value; /* disparity err */ reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DISP_ERR); sphy->running_disparity_error_count += reg_value; /* code violation error */ reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_CODE_ERR); phy->code_violation_err_count += reg_value; spin_unlock_irqrestore(&phy->lock, flags); } static void handle_chl_int2_v3_hw(struct hisi_hba *hisi_hba, int phy_no) { u32 irq_msk = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2_MSK); u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2); struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; struct pci_dev *pci_dev = hisi_hba->pci_dev; struct device *dev = hisi_hba->dev; static const u32 msk = BIT(CHL_INT2_RX_DISP_ERR_OFF) | BIT(CHL_INT2_RX_CODE_ERR_OFF) | BIT(CHL_INT2_RX_INVLD_DW_OFF); irq_value &= ~irq_msk; if (!irq_value) { dev_warn(dev, "phy%d channel int 2 received with status bits cleared\n", phy_no); return; } if (irq_value & BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF)) { dev_warn(dev, "phy%d identify timeout\n", phy_no); hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); } if (irq_value & BIT(CHL_INT2_STP_LINK_TIMEOUT_OFF)) { u32 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, STP_LINK_TIMEOUT_STATE); dev_warn(dev, "phy%d stp link timeout (0x%x)\n", phy_no, reg_value); if (reg_value & BIT(4)) hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); } if (pci_dev->revision > 0x20 && (irq_value & msk)) { struct asd_sas_phy *sas_phy = &phy->sas_phy; struct sas_phy *sphy = sas_phy->phy; phy_get_events_v3_hw(hisi_hba, phy_no); if (irq_value & BIT(CHL_INT2_RX_INVLD_DW_OFF)) dev_info(dev, "phy%d invalid dword cnt: %u\n", phy_no, sphy->invalid_dword_count); if (irq_value & BIT(CHL_INT2_RX_CODE_ERR_OFF)) dev_info(dev, "phy%d code violation cnt: %u\n", phy_no, phy->code_violation_err_count); if (irq_value & BIT(CHL_INT2_RX_DISP_ERR_OFF)) dev_info(dev, "phy%d disparity error cnt: %u\n", phy_no, sphy->running_disparity_error_count); } if ((irq_value & BIT(CHL_INT2_RX_INVLD_DW_OFF)) && (pci_dev->revision == 0x20)) { u32 reg_value; int rc; rc = hisi_sas_read32_poll_timeout_atomic( HILINK_ERR_DFX, reg_value, !((reg_value >> 8) & BIT(phy_no)), 1000, 10000); if (rc) hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); } hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2, irq_value); } static void handle_chl_int0_v3_hw(struct hisi_hba *hisi_hba, int phy_no) { u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT0); if (irq_value0 & CHL_INT0_PHY_RDY_MSK) hisi_sas_phy_oob_ready(hisi_hba, phy_no); hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, irq_value0 & (~CHL_INT0_SL_RX_BCST_ACK_MSK) & (~CHL_INT0_SL_PHY_ENABLE_MSK) & (~CHL_INT0_NOT_RDY_MSK)); } static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p) { struct hisi_hba *hisi_hba = p; u32 irq_msk; int phy_no = 0; irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS) & CHNL_INT_STS_MSK; while (irq_msk) { if (irq_msk & (CHNL_INT_STS_INT0_MSK << (phy_no * CHNL_WIDTH))) handle_chl_int0_v3_hw(hisi_hba, phy_no); if (irq_msk & (CHNL_INT_STS_INT1_MSK << (phy_no * CHNL_WIDTH))) handle_chl_int1_v3_hw(hisi_hba, phy_no); if (irq_msk & (CHNL_INT_STS_INT2_MSK << (phy_no * CHNL_WIDTH))) handle_chl_int2_v3_hw(hisi_hba, phy_no); irq_msk &= ~(CHNL_INT_STS_PHY_MSK << (phy_no * CHNL_WIDTH)); phy_no++; } return IRQ_HANDLED; } static const struct hisi_sas_hw_error multi_bit_ecc_errors[] = { { .irq_msk = BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF), .msk = HGC_DQE_ECC_MB_ADDR_MSK, .shift = HGC_DQE_ECC_MB_ADDR_OFF, .msg = "hgc_dqe_eccbad_intr", .reg = HGC_DQE_ECC_ADDR, }, { .irq_msk = BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF), .msk = HGC_IOST_ECC_MB_ADDR_MSK, .shift = HGC_IOST_ECC_MB_ADDR_OFF, .msg = "hgc_iost_eccbad_intr", .reg = HGC_IOST_ECC_ADDR, }, { .irq_msk = BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF), .msk = HGC_ITCT_ECC_MB_ADDR_MSK, .shift = HGC_ITCT_ECC_MB_ADDR_OFF, .msg = "hgc_itct_eccbad_intr", .reg = HGC_ITCT_ECC_ADDR, }, { .irq_msk = BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF), .msk = HGC_LM_DFX_STATUS2_IOSTLIST_MSK, .shift = HGC_LM_DFX_STATUS2_IOSTLIST_OFF, .msg = "hgc_iostl_eccbad_intr", .reg = HGC_LM_DFX_STATUS2, }, { .irq_msk = BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF), .msk = HGC_LM_DFX_STATUS2_ITCTLIST_MSK, .shift = HGC_LM_DFX_STATUS2_ITCTLIST_OFF, .msg = "hgc_itctl_eccbad_intr", .reg = HGC_LM_DFX_STATUS2, }, { .irq_msk = BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF), .msk = HGC_CQE_ECC_MB_ADDR_MSK, .shift = HGC_CQE_ECC_MB_ADDR_OFF, .msg = "hgc_cqe_eccbad_intr", .reg = HGC_CQE_ECC_ADDR, }, { .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF), .msk = HGC_RXM_DFX_STATUS14_MEM0_MSK, .shift = HGC_RXM_DFX_STATUS14_MEM0_OFF, .msg = "rxm_mem0_eccbad_intr", .reg = HGC_RXM_DFX_STATUS14, }, { .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF), .msk = HGC_RXM_DFX_STATUS14_MEM1_MSK, .shift = HGC_RXM_DFX_STATUS14_MEM1_OFF, .msg = "rxm_mem1_eccbad_intr", .reg = HGC_RXM_DFX_STATUS14, }, { .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF), .msk = HGC_RXM_DFX_STATUS14_MEM2_MSK, .shift = HGC_RXM_DFX_STATUS14_MEM2_OFF, .msg = "rxm_mem2_eccbad_intr", .reg = HGC_RXM_DFX_STATUS14, }, { .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF), .msk = HGC_RXM_DFX_STATUS15_MEM3_MSK, .shift = HGC_RXM_DFX_STATUS15_MEM3_OFF, .msg = "rxm_mem3_eccbad_intr", .reg = HGC_RXM_DFX_STATUS15, }, { .irq_msk = BIT(SAS_ECC_INTR_OOO_RAM_ECC_MB_OFF), .msk = AM_ROB_ECC_ERR_ADDR_MSK, .shift = AM_ROB_ECC_ERR_ADDR_OFF, .msg = "ooo_ram_eccbad_intr", .reg = AM_ROB_ECC_ERR_ADDR, }, }; static void multi_bit_ecc_error_process_v3_hw(struct hisi_hba *hisi_hba, u32 irq_value) { struct device *dev = hisi_hba->dev; const struct hisi_sas_hw_error *ecc_error; u32 val; int i; for (i = 0; i < ARRAY_SIZE(multi_bit_ecc_errors); i++) { ecc_error = &multi_bit_ecc_errors[i]; if (irq_value & ecc_error->irq_msk) { val = hisi_sas_read32(hisi_hba, ecc_error->reg); val &= ecc_error->msk; val >>= ecc_error->shift; dev_err(dev, "%s (0x%x) found: mem addr is 0x%08X\n", ecc_error->msg, irq_value, val); queue_work(hisi_hba->wq, &hisi_hba->rst_work); } } } static void fatal_ecc_int_v3_hw(struct hisi_hba *hisi_hba) { u32 irq_value, irq_msk; irq_msk = hisi_sas_read32(hisi_hba, SAS_ECC_INTR_MSK); hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xffffffff); irq_value = hisi_sas_read32(hisi_hba, SAS_ECC_INTR); if (irq_value) multi_bit_ecc_error_process_v3_hw(hisi_hba, irq_value); hisi_sas_write32(hisi_hba, SAS_ECC_INTR, irq_value); hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk); } static const struct hisi_sas_hw_error axi_error[] = { { .msk = BIT(0), .msg = "IOST_AXI_W_ERR" }, { .msk = BIT(1), .msg = "IOST_AXI_R_ERR" }, { .msk = BIT(2), .msg = "ITCT_AXI_W_ERR" }, { .msk = BIT(3), .msg = "ITCT_AXI_R_ERR" }, { .msk = BIT(4), .msg = "SATA_AXI_W_ERR" }, { .msk = BIT(5), .msg = "SATA_AXI_R_ERR" }, { .msk = BIT(6), .msg = "DQE_AXI_R_ERR" }, { .msk = BIT(7), .msg = "CQE_AXI_W_ERR" }, {} }; static const struct hisi_sas_hw_error fifo_error[] = { { .msk = BIT(8), .msg = "CQE_WINFO_FIFO" }, { .msk = BIT(9), .msg = "CQE_MSG_FIFIO" }, { .msk = BIT(10), .msg = "GETDQE_FIFO" }, { .msk = BIT(11), .msg = "CMDP_FIFO" }, { .msk = BIT(12), .msg = "AWTCTRL_FIFO" }, {} }; static const struct hisi_sas_hw_error fatal_axi_error[] = { { .irq_msk = BIT(ENT_INT_SRC3_WP_DEPTH_OFF), .msg = "write pointer and depth", }, { .irq_msk = BIT(ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF), .msg = "iptt no match slot", }, { .irq_msk = BIT(ENT_INT_SRC3_RP_DEPTH_OFF), .msg = "read pointer and depth", }, { .irq_msk = BIT(ENT_INT_SRC3_AXI_OFF), .reg = HGC_AXI_FIFO_ERR_INFO, .sub = axi_error, }, { .irq_msk = BIT(ENT_INT_SRC3_FIFO_OFF), .reg = HGC_AXI_FIFO_ERR_INFO, .sub = fifo_error, }, { .irq_msk = BIT(ENT_INT_SRC3_LM_OFF), .msg = "LM add/fetch list", }, { .irq_msk = BIT(ENT_INT_SRC3_ABT_OFF), .msg = "SAS_HGC_ABT fetch LM list", }, { .irq_msk = BIT(ENT_INT_SRC3_DQE_POISON_OFF), .msg = "read dqe poison", }, { .irq_msk = BIT(ENT_INT_SRC3_IOST_POISON_OFF), .msg = "read iost poison", }, { .irq_msk = BIT(ENT_INT_SRC3_ITCT_POISON_OFF), .msg = "read itct poison", }, { .irq_msk = BIT(ENT_INT_SRC3_ITCT_NCQ_POISON_OFF), .msg = "read itct ncq poison", }, }; static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p) { u32 irq_value, irq_msk; struct hisi_hba *hisi_hba = p; struct device *dev = hisi_hba->dev; struct pci_dev *pdev = hisi_hba->pci_dev; int i; irq_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3); hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk | 0x1df00); irq_value = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); irq_value &= ~irq_msk; for (i = 0; i < ARRAY_SIZE(fatal_axi_error); i++) { const struct hisi_sas_hw_error *error = &fatal_axi_error[i]; if (!(irq_value & error->irq_msk)) continue; if (error->sub) { const struct hisi_sas_hw_error *sub = error->sub; u32 err_value = hisi_sas_read32(hisi_hba, error->reg); for (; sub->msk || sub->msg; sub++) { if (!(err_value & sub->msk)) continue; dev_err(dev, "%s error (0x%x) found!\n", sub->msg, irq_value); queue_work(hisi_hba->wq, &hisi_hba->rst_work); } } else { dev_err(dev, "%s error (0x%x) found!\n", error->msg, irq_value); queue_work(hisi_hba->wq, &hisi_hba->rst_work); } if (pdev->revision < 0x21) { u32 reg_val; reg_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE + AM_CTRL_GLOBAL); reg_val |= AM_CTRL_SHUTDOWN_REQ_MSK; hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + AM_CTRL_GLOBAL, reg_val); } } fatal_ecc_int_v3_hw(hisi_hba); if (irq_value & BIT(ENT_INT_SRC3_ITC_INT_OFF)) { u32 reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR); u32 dev_id = reg_val & ITCT_DEV_MSK; struct hisi_sas_device *sas_dev = &hisi_hba->devices[dev_id]; hisi_sas_write32(hisi_hba, ITCT_CLR, 0); dev_dbg(dev, "clear ITCT ok\n"); complete(sas_dev->completion); } hisi_sas_write32(hisi_hba, ENT_INT_SRC3, irq_value & 0x1df00); hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk); return IRQ_HANDLED; } static bool is_ncq_err_v3_hw(struct hisi_sas_complete_v3_hdr *complete_hdr) { u32 dw0, dw3; dw0 = le32_to_cpu(complete_hdr->dw0); dw3 = le32_to_cpu(complete_hdr->dw3); return (dw0 & ERR_PHASE_RESPONSE_FRAME_REV_STAGE_MSK) && (dw3 & FIS_TYPE_SDB_MSK) && (dw3 & FIS_ATA_STATUS_ERR_MSK); } static bool slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task, struct hisi_sas_slot *slot) { struct task_status_struct *ts = &task->task_status; struct hisi_sas_complete_v3_hdr *complete_queue = hisi_hba->complete_hdr[slot->cmplt_queue]; struct hisi_sas_complete_v3_hdr *complete_hdr = &complete_queue[slot->cmplt_queue_slot]; struct hisi_sas_err_record_v3 *record = hisi_sas_status_buf_addr_mem(slot); u32 dma_rx_err_type = le32_to_cpu(record->dma_rx_err_type); u32 trans_tx_fail_type = le32_to_cpu(record->trans_tx_fail_type); u16 sipc_rx_err_type = le16_to_cpu(record->sipc_rx_err_type); u32 dw3 = le32_to_cpu(complete_hdr->dw3); u32 dw0 = le32_to_cpu(complete_hdr->dw0); switch (task->task_proto) { case SAS_PROTOCOL_SSP: if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) { /* * If returned response frame is incorrect because of data underflow, * but I/O information has been written to the host memory, we examine * response IU. */ if (!(dw0 & CMPLT_HDR_RSPNS_GOOD_MSK) && (dw0 & CMPLT_HDR_RSPNS_XFRD_MSK)) return false; ts->residual = trans_tx_fail_type; ts->stat = SAS_DATA_UNDERRUN; } else if (dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) { ts->stat = SAS_QUEUE_FULL; slot->abort = 1; } else { ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; } break; case SAS_PROTOCOL_SATA: case SAS_PROTOCOL_STP: case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: if ((dw0 & CMPLT_HDR_RSPNS_XFRD_MSK) && (sipc_rx_err_type & RX_FIS_STATUS_ERR_MSK)) { ts->stat = SAS_PROTO_RESPONSE; } else if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) { ts->residual = trans_tx_fail_type; ts->stat = SAS_DATA_UNDERRUN; } else if ((dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) || (dw3 & SATA_DISK_IN_ERROR_STATUS_MSK)) { ts->stat = SAS_PHY_DOWN; slot->abort = 1; } else { ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; } if (dw0 & CMPLT_HDR_RSPNS_XFRD_MSK) hisi_sas_sata_done(task, slot); break; case SAS_PROTOCOL_SMP: ts->stat = SAS_SAM_STAT_CHECK_CONDITION; break; default: break; } return true; } static void slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) { struct sas_task *task = slot->task; struct hisi_sas_device *sas_dev; struct device *dev = hisi_hba->dev; struct task_status_struct *ts; struct domain_device *device; struct sas_ha_struct *ha; struct hisi_sas_complete_v3_hdr *complete_queue = hisi_hba->complete_hdr[slot->cmplt_queue]; struct hisi_sas_complete_v3_hdr *complete_hdr = &complete_queue[slot->cmplt_queue_slot]; unsigned long flags; bool is_internal = slot->is_internal; u32 dw0, dw1, dw3; if (unlikely(!task || !task->lldd_task || !task->dev)) return; ts = &task->task_status; device = task->dev; ha = device->port->ha; sas_dev = device->lldd_dev; spin_lock_irqsave(&task->task_state_lock, flags); task->task_state_flags &= ~SAS_TASK_STATE_PENDING; spin_unlock_irqrestore(&task->task_state_lock, flags); memset(ts, 0, sizeof(*ts)); ts->resp = SAS_TASK_COMPLETE; if (unlikely(!sas_dev)) { dev_dbg(dev, "slot complete: port has not device\n"); ts->stat = SAS_PHY_DOWN; goto out; } dw0 = le32_to_cpu(complete_hdr->dw0); dw1 = le32_to_cpu(complete_hdr->dw1); dw3 = le32_to_cpu(complete_hdr->dw3); /* * Use SAS+TMF status codes */ switch ((dw0 & CMPLT_HDR_ABORT_STAT_MSK) >> CMPLT_HDR_ABORT_STAT_OFF) { case STAT_IO_ABORTED: /* this IO has been aborted by abort command */ ts->stat = SAS_ABORTED_TASK; goto out; case STAT_IO_COMPLETE: /* internal abort command complete */ ts->stat = TMF_RESP_FUNC_SUCC; goto out; case STAT_IO_NO_DEVICE: ts->stat = TMF_RESP_FUNC_COMPLETE; goto out; case STAT_IO_NOT_VALID: /* * abort single IO, the controller can't find the IO */ ts->stat = TMF_RESP_FUNC_FAILED; goto out; default: break; } /* check for erroneous completion */ if ((dw0 & CMPLT_HDR_CMPLT_MSK) == 0x3) { u32 *error_info = hisi_sas_status_buf_addr_mem(slot); if (slot_err_v3_hw(hisi_hba, task, slot)) { if (ts->stat != SAS_DATA_UNDERRUN) dev_info(dev, "erroneous completion iptt=%d task=%pK dev id=%d addr=%016llx CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n", slot->idx, task, sas_dev->device_id, SAS_ADDR(device->sas_addr), dw0, dw1, complete_hdr->act, dw3, error_info[0], error_info[1], error_info[2], error_info[3]); if (unlikely(slot->abort)) { if (dev_is_sata(device) && task->ata_task.use_ncq) sas_ata_device_link_abort(device, true); else sas_task_abort(task); return; } goto out; } } switch (task->task_proto) { case SAS_PROTOCOL_SSP: { struct ssp_response_iu *iu = hisi_sas_status_buf_addr_mem(slot) + sizeof(struct hisi_sas_err_record); sas_ssp_task_response(dev, task, iu); break; } case SAS_PROTOCOL_SMP: { struct scatterlist *sg_resp = &task->smp_task.smp_resp; void *to = page_address(sg_page(sg_resp)); ts->stat = SAS_SAM_STAT_GOOD; memcpy(to + sg_resp->offset, hisi_sas_status_buf_addr_mem(slot) + sizeof(struct hisi_sas_err_record), sg_resp->length); break; } case SAS_PROTOCOL_SATA: case SAS_PROTOCOL_STP: case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: ts->stat = SAS_SAM_STAT_GOOD; if (dw0 & CMPLT_HDR_RSPNS_XFRD_MSK) hisi_sas_sata_done(task, slot); break; default: ts->stat = SAS_SAM_STAT_CHECK_CONDITION; break; } if (!slot->port->port_attached) { dev_warn(dev, "slot complete: port %d has removed\n", slot->port->sas_port.id); ts->stat = SAS_PHY_DOWN; } out: spin_lock_irqsave(&task->task_state_lock, flags); if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { spin_unlock_irqrestore(&task->task_state_lock, flags); dev_info(dev, "slot complete: task(%pK) aborted\n", task); return; } task->task_state_flags |= SAS_TASK_STATE_DONE; spin_unlock_irqrestore(&task->task_state_lock, flags); hisi_sas_slot_task_free(hisi_hba, task, slot, true); if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) { spin_lock_irqsave(&device->done_lock, flags); if (test_bit(SAS_HA_FROZEN, &ha->state)) { spin_unlock_irqrestore(&device->done_lock, flags); dev_info(dev, "slot complete: task(%pK) ignored\n ", task); return; } spin_unlock_irqrestore(&device->done_lock, flags); } if (task->task_done) task->task_done(task); } static int complete_v3_hw(struct hisi_sas_cq *cq) { struct hisi_sas_complete_v3_hdr *complete_queue; struct hisi_hba *hisi_hba = cq->hisi_hba; u32 rd_point, wr_point; int queue = cq->id; int completed; rd_point = cq->rd_point; complete_queue = hisi_hba->complete_hdr[queue]; wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR + (0x14 * queue)); completed = (wr_point + HISI_SAS_QUEUE_SLOTS - rd_point) % HISI_SAS_QUEUE_SLOTS; while (rd_point != wr_point) { struct hisi_sas_complete_v3_hdr *complete_hdr; struct device *dev = hisi_hba->dev; struct hisi_sas_slot *slot; u32 dw0, dw1, dw3; int iptt; complete_hdr = &complete_queue[rd_point]; dw0 = le32_to_cpu(complete_hdr->dw0); dw1 = le32_to_cpu(complete_hdr->dw1); dw3 = le32_to_cpu(complete_hdr->dw3); iptt = dw1 & CMPLT_HDR_IPTT_MSK; if (unlikely((dw0 & CMPLT_HDR_CMPLT_MSK) == 0x3) && (dw3 & CMPLT_HDR_SATA_DISK_ERR_MSK)) { int device_id = (dw1 & CMPLT_HDR_DEV_ID_MSK) >> CMPLT_HDR_DEV_ID_OFF; struct hisi_sas_itct *itct = &hisi_hba->itct[device_id]; struct hisi_sas_device *sas_dev = &hisi_hba->devices[device_id]; struct domain_device *device = sas_dev->sas_device; dev_err(dev, "erroneous completion disk err dev id=%d sas_addr=0x%llx CQ hdr: 0x%x 0x%x 0x%x 0x%x\n", device_id, itct->sas_addr, dw0, dw1, complete_hdr->act, dw3); if (is_ncq_err_v3_hw(complete_hdr)) sas_dev->dev_status = HISI_SAS_DEV_NCQ_ERR; sas_ata_device_link_abort(device, true); } else if (likely(iptt < HISI_SAS_COMMAND_ENTRIES_V3_HW)) { slot = &hisi_hba->slot_info[iptt]; slot->cmplt_queue_slot = rd_point; slot->cmplt_queue = queue; slot_complete_v3_hw(hisi_hba, slot); } else dev_err(dev, "IPTT %d is invalid, discard it.\n", iptt); if (++rd_point >= HISI_SAS_QUEUE_SLOTS) rd_point = 0; } /* update rd_point */ cq->rd_point = rd_point; hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point); return completed; } static int queue_complete_v3_hw(struct Scsi_Host *shost, unsigned int queue) { struct hisi_hba *hisi_hba = shost_priv(shost); struct hisi_sas_cq *cq = &hisi_hba->cq[queue]; int completed; spin_lock(&cq->poll_lock); completed = complete_v3_hw(cq); spin_unlock(&cq->poll_lock); return completed; } static irqreturn_t cq_thread_v3_hw(int irq_no, void *p) { struct hisi_sas_cq *cq = p; complete_v3_hw(cq); return IRQ_HANDLED; } static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p) { struct hisi_sas_cq *cq = p; struct hisi_hba *hisi_hba = cq->hisi_hba; int queue = cq->id; hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue); return IRQ_WAKE_THREAD; } static void hisi_sas_v3_free_vectors(void *data) { struct pci_dev *pdev = data; pci_free_irq_vectors(pdev); } static int interrupt_preinit_v3_hw(struct hisi_hba *hisi_hba) { /* Allocate all MSI vectors to avoid re-insertion issue */ int max_msi = HISI_SAS_MSI_COUNT_V3_HW; int vectors, min_msi; struct Scsi_Host *shost = hisi_hba->shost; struct pci_dev *pdev = hisi_hba->pci_dev; struct irq_affinity desc = { .pre_vectors = BASE_VECTORS_V3_HW, }; min_msi = MIN_AFFINE_VECTORS_V3_HW; vectors = pci_alloc_irq_vectors_affinity(pdev, min_msi, max_msi, PCI_IRQ_MSI | PCI_IRQ_AFFINITY, &desc); if (vectors < 0) return -ENOENT; hisi_hba->cq_nvecs = vectors - BASE_VECTORS_V3_HW - hisi_hba->iopoll_q_cnt; shost->nr_hw_queues = hisi_hba->cq_nvecs + hisi_hba->iopoll_q_cnt; return devm_add_action(&pdev->dev, hisi_sas_v3_free_vectors, pdev); } static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) { struct device *dev = hisi_hba->dev; struct pci_dev *pdev = hisi_hba->pci_dev; int rc, i; rc = devm_request_irq(dev, pci_irq_vector(pdev, 1), int_phy_up_down_bcast_v3_hw, 0, DRV_NAME " phy", hisi_hba); if (rc) { dev_err(dev, "could not request phy interrupt, rc=%d\n", rc); return -ENOENT; } rc = devm_request_irq(dev, pci_irq_vector(pdev, 2), int_chnl_int_v3_hw, 0, DRV_NAME " channel", hisi_hba); if (rc) { dev_err(dev, "could not request chnl interrupt, rc=%d\n", rc); return -ENOENT; } rc = devm_request_irq(dev, pci_irq_vector(pdev, 11), fatal_axi_int_v3_hw, 0, DRV_NAME " fatal", hisi_hba); if (rc) { dev_err(dev, "could not request fatal interrupt, rc=%d\n", rc); return -ENOENT; } if (hisi_sas_intr_conv) dev_info(dev, "Enable interrupt converge\n"); for (i = 0; i < hisi_hba->cq_nvecs; i++) { struct hisi_sas_cq *cq = &hisi_hba->cq[i]; int nr = hisi_sas_intr_conv ? 16 : 16 + i; unsigned long irqflags = hisi_sas_intr_conv ? IRQF_SHARED : IRQF_ONESHOT; cq->irq_no = pci_irq_vector(pdev, nr); rc = devm_request_threaded_irq(dev, cq->irq_no, cq_interrupt_v3_hw, cq_thread_v3_hw, irqflags, DRV_NAME " cq", cq); if (rc) { dev_err(dev, "could not request cq%d interrupt, rc=%d\n", i, rc); return -ENOENT; } cq->irq_mask = pci_irq_get_affinity(pdev, i + BASE_VECTORS_V3_HW); if (!cq->irq_mask) { dev_err(dev, "could not get cq%d irq affinity!\n", i); return -ENOENT; } } return 0; } static int hisi_sas_v3_init(struct hisi_hba *hisi_hba) { int rc; rc = hw_init_v3_hw(hisi_hba); if (rc) return rc; rc = interrupt_init_v3_hw(hisi_hba); if (rc) return rc; return 0; } static void phy_set_linkrate_v3_hw(struct hisi_hba *hisi_hba, int phy_no, struct sas_phy_linkrates *r) { enum sas_linkrate max = r->maximum_linkrate; u32 prog_phy_link_rate = hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE); prog_phy_link_rate &= ~CFG_PROG_PHY_LINK_RATE_MSK; prog_phy_link_rate |= hisi_sas_get_prog_phy_linkrate_mask(max); hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE, prog_phy_link_rate); } static void interrupt_disable_v3_hw(struct hisi_hba *hisi_hba) { struct pci_dev *pdev = hisi_hba->pci_dev; int i; synchronize_irq(pci_irq_vector(pdev, 1)); synchronize_irq(pci_irq_vector(pdev, 2)); synchronize_irq(pci_irq_vector(pdev, 11)); for (i = 0; i < hisi_hba->queue_count; i++) hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0x1); for (i = 0; i < hisi_hba->cq_nvecs; i++) synchronize_irq(pci_irq_vector(pdev, i + 16)); hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xffffffff); hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xffffffff); hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffffffff); hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xffffffff); for (i = 0; i < hisi_hba->n_phy; i++) { hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff); hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffffff); hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x1); hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x1); hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x1); } } static u32 get_phys_state_v3_hw(struct hisi_hba *hisi_hba) { return hisi_sas_read32(hisi_hba, PHY_STATE); } static int disable_host_v3_hw(struct hisi_hba *hisi_hba) { struct device *dev = hisi_hba->dev; u32 status, reg_val; int rc; hisi_sas_sync_poll_cqs(hisi_hba); hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0); hisi_sas_stop_phys(hisi_hba); mdelay(10); reg_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE + AM_CTRL_GLOBAL); reg_val |= AM_CTRL_SHUTDOWN_REQ_MSK; hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + AM_CTRL_GLOBAL, reg_val); /* wait until bus idle */ rc = hisi_sas_read32_poll_timeout(AXI_MASTER_CFG_BASE + AM_CURR_TRANS_RETURN, status, status == 0x3, 10, 100); if (rc) { dev_err(dev, "axi bus is not idle, rc=%d\n", rc); return rc; } return 0; } static int soft_reset_v3_hw(struct hisi_hba *hisi_hba) { struct device *dev = hisi_hba->dev; int rc; interrupt_disable_v3_hw(hisi_hba); rc = disable_host_v3_hw(hisi_hba); if (rc) { dev_err(dev, "soft reset: disable host failed rc=%d\n", rc); return rc; } hisi_sas_init_mem(hisi_hba); return hw_init_v3_hw(hisi_hba); } static int write_gpio_v3_hw(struct hisi_hba *hisi_hba, u8 reg_type, u8 reg_index, u8 reg_count, u8 *write_data) { struct device *dev = hisi_hba->dev; u32 *data = (u32 *)write_data; int i; switch (reg_type) { case SAS_GPIO_REG_TX: if ((reg_index + reg_count) > ((hisi_hba->n_phy + 3) / 4)) { dev_err(dev, "write gpio: invalid reg range[%d, %d]\n", reg_index, reg_index + reg_count - 1); return -EINVAL; } for (i = 0; i < reg_count; i++) hisi_sas_write32(hisi_hba, SAS_GPIO_TX_0_1 + (reg_index + i) * 4, data[i]); break; default: dev_err(dev, "write gpio: unsupported or bad reg type %d\n", reg_type); return -EINVAL; } return 0; } static void wait_cmds_complete_timeout_v3_hw(struct hisi_hba *hisi_hba, int delay_ms, int timeout_ms) { struct device *dev = hisi_hba->dev; int entries, entries_old = 0, time; for (time = 0; time < timeout_ms; time += delay_ms) { entries = hisi_sas_read32(hisi_hba, CQE_SEND_CNT); if (entries == entries_old) break; entries_old = entries; msleep(delay_ms); } if (time >= timeout_ms) { dev_dbg(dev, "Wait commands complete timeout!\n"); return; } dev_dbg(dev, "wait commands complete %dms\n", time); } static ssize_t intr_conv_v3_hw_show(struct device *dev, struct device_attribute *attr, char *buf) { return scnprintf(buf, PAGE_SIZE, "%u\n", hisi_sas_intr_conv); } static DEVICE_ATTR_RO(intr_conv_v3_hw); static void config_intr_coal_v3_hw(struct hisi_hba *hisi_hba) { /* config those registers between enable and disable PHYs */ hisi_sas_stop_phys(hisi_hba); if (hisi_hba->intr_coal_ticks == 0 || hisi_hba->intr_coal_count == 0) { hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1); hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1); hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1); } else { hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x3); hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, hisi_hba->intr_coal_ticks); hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, hisi_hba->intr_coal_count); } phys_init_v3_hw(hisi_hba); } static ssize_t intr_coal_ticks_v3_hw_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct hisi_hba *hisi_hba = shost_priv(shost); return scnprintf(buf, PAGE_SIZE, "%u\n", hisi_hba->intr_coal_ticks); } static ssize_t intr_coal_ticks_v3_hw_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct hisi_hba *hisi_hba = shost_priv(shost); u32 intr_coal_ticks; int ret; ret = kstrtou32(buf, 10, &intr_coal_ticks); if (ret) { dev_err(dev, "Input data of interrupt coalesce unmatch\n"); return -EINVAL; } if (intr_coal_ticks >= BIT(24)) { dev_err(dev, "intr_coal_ticks must be less than 2^24!\n"); return -EINVAL; } hisi_hba->intr_coal_ticks = intr_coal_ticks; config_intr_coal_v3_hw(hisi_hba); return count; } static DEVICE_ATTR_RW(intr_coal_ticks_v3_hw); static ssize_t intr_coal_count_v3_hw_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct hisi_hba *hisi_hba = shost_priv(shost); return scnprintf(buf, PAGE_SIZE, "%u\n", hisi_hba->intr_coal_count); } static ssize_t intr_coal_count_v3_hw_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct hisi_hba *hisi_hba = shost_priv(shost); u32 intr_coal_count; int ret; ret = kstrtou32(buf, 10, &intr_coal_count); if (ret) { dev_err(dev, "Input data of interrupt coalesce unmatch\n"); return -EINVAL; } if (intr_coal_count >= BIT(8)) { dev_err(dev, "intr_coal_count must be less than 2^8!\n"); return -EINVAL; } hisi_hba->intr_coal_count = intr_coal_count; config_intr_coal_v3_hw(hisi_hba); return count; } static DEVICE_ATTR_RW(intr_coal_count_v3_hw); static ssize_t iopoll_q_cnt_v3_hw_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct hisi_hba *hisi_hba = shost_priv(shost); return scnprintf(buf, PAGE_SIZE, "%u\n", hisi_hba->iopoll_q_cnt); } static DEVICE_ATTR_RO(iopoll_q_cnt_v3_hw); static int slave_configure_v3_hw(struct scsi_device *sdev) { struct Scsi_Host *shost = dev_to_shost(&sdev->sdev_gendev); struct hisi_hba *hisi_hba = shost_priv(shost); int ret = hisi_sas_slave_configure(sdev); struct device *dev = hisi_hba->dev; if (ret) return ret; if (sdev->type == TYPE_ENCLOSURE) return 0; if (!device_link_add(&sdev->sdev_gendev, dev, DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE)) { if (pm_runtime_enabled(dev)) { dev_info(dev, "add device link failed, disable runtime PM for the host\n"); pm_runtime_disable(dev); } } return 0; } static struct attribute *host_v3_hw_attrs[] = { &dev_attr_phy_event_threshold.attr, &dev_attr_intr_conv_v3_hw.attr, &dev_attr_intr_coal_ticks_v3_hw.attr, &dev_attr_intr_coal_count_v3_hw.attr, &dev_attr_iopoll_q_cnt_v3_hw.attr, NULL }; ATTRIBUTE_GROUPS(host_v3_hw); #define HISI_SAS_DEBUGFS_REG(x) {#x, x} struct hisi_sas_debugfs_reg_lu { char *name; int off; }; struct hisi_sas_debugfs_reg { const struct hisi_sas_debugfs_reg_lu *lu; int count; int base_off; }; static const struct hisi_sas_debugfs_reg_lu debugfs_port_reg_lu[] = { HISI_SAS_DEBUGFS_REG(PHY_CFG), HISI_SAS_DEBUGFS_REG(HARD_PHY_LINKRATE), HISI_SAS_DEBUGFS_REG(PROG_PHY_LINK_RATE), HISI_SAS_DEBUGFS_REG(PHY_CTRL), HISI_SAS_DEBUGFS_REG(SL_CFG), HISI_SAS_DEBUGFS_REG(AIP_LIMIT), HISI_SAS_DEBUGFS_REG(SL_CONTROL), HISI_SAS_DEBUGFS_REG(RX_PRIMS_STATUS), HISI_SAS_DEBUGFS_REG(TX_ID_DWORD0), HISI_SAS_DEBUGFS_REG(TX_ID_DWORD1), HISI_SAS_DEBUGFS_REG(TX_ID_DWORD2), HISI_SAS_DEBUGFS_REG(TX_ID_DWORD3), HISI_SAS_DEBUGFS_REG(TX_ID_DWORD4), HISI_SAS_DEBUGFS_REG(TX_ID_DWORD5), HISI_SAS_DEBUGFS_REG(TX_ID_DWORD6), HISI_SAS_DEBUGFS_REG(TXID_AUTO), HISI_SAS_DEBUGFS_REG(RX_IDAF_DWORD0), HISI_SAS_DEBUGFS_REG(RXOP_CHECK_CFG_H), HISI_SAS_DEBUGFS_REG(STP_LINK_TIMER), HISI_SAS_DEBUGFS_REG(STP_LINK_TIMEOUT_STATE), HISI_SAS_DEBUGFS_REG(CON_CFG_DRIVER), HISI_SAS_DEBUGFS_REG(SAS_SSP_CON_TIMER_CFG), HISI_SAS_DEBUGFS_REG(SAS_SMP_CON_TIMER_CFG), HISI_SAS_DEBUGFS_REG(SAS_STP_CON_TIMER_CFG), HISI_SAS_DEBUGFS_REG(CHL_INT0), HISI_SAS_DEBUGFS_REG(CHL_INT1), HISI_SAS_DEBUGFS_REG(CHL_INT2), HISI_SAS_DEBUGFS_REG(CHL_INT0_MSK), HISI_SAS_DEBUGFS_REG(CHL_INT1_MSK), HISI_SAS_DEBUGFS_REG(CHL_INT2_MSK), HISI_SAS_DEBUGFS_REG(SAS_EC_INT_COAL_TIME), HISI_SAS_DEBUGFS_REG(CHL_INT_COAL_EN), HISI_SAS_DEBUGFS_REG(SAS_RX_TRAIN_TIMER), HISI_SAS_DEBUGFS_REG(PHY_CTRL_RDY_MSK), HISI_SAS_DEBUGFS_REG(PHYCTRL_NOT_RDY_MSK), HISI_SAS_DEBUGFS_REG(PHYCTRL_DWS_RESET_MSK), HISI_SAS_DEBUGFS_REG(PHYCTRL_PHY_ENA_MSK), HISI_SAS_DEBUGFS_REG(SL_RX_BCAST_CHK_MSK), HISI_SAS_DEBUGFS_REG(PHYCTRL_OOB_RESTART_MSK), HISI_SAS_DEBUGFS_REG(DMA_TX_STATUS), HISI_SAS_DEBUGFS_REG(DMA_RX_STATUS), HISI_SAS_DEBUGFS_REG(COARSETUNE_TIME), HISI_SAS_DEBUGFS_REG(ERR_CNT_DWS_LOST), HISI_SAS_DEBUGFS_REG(ERR_CNT_RESET_PROB), HISI_SAS_DEBUGFS_REG(ERR_CNT_INVLD_DW), HISI_SAS_DEBUGFS_REG(ERR_CNT_CODE_ERR), HISI_SAS_DEBUGFS_REG(ERR_CNT_DISP_ERR), {} }; static const struct hisi_sas_debugfs_reg debugfs_port_reg = { .lu = debugfs_port_reg_lu, .count = 0x100, .base_off = PORT_BASE, }; static const struct hisi_sas_debugfs_reg_lu debugfs_global_reg_lu[] = { HISI_SAS_DEBUGFS_REG(DLVRY_QUEUE_ENABLE), HISI_SAS_DEBUGFS_REG(PHY_CONTEXT), HISI_SAS_DEBUGFS_REG(PHY_STATE), HISI_SAS_DEBUGFS_REG(PHY_PORT_NUM_MA), HISI_SAS_DEBUGFS_REG(PHY_CONN_RATE), HISI_SAS_DEBUGFS_REG(ITCT_CLR), HISI_SAS_DEBUGFS_REG(IO_SATA_BROKEN_MSG_ADDR_LO), HISI_SAS_DEBUGFS_REG(IO_SATA_BROKEN_MSG_ADDR_HI), HISI_SAS_DEBUGFS_REG(SATA_INITI_D2H_STORE_ADDR_LO), HISI_SAS_DEBUGFS_REG(SATA_INITI_D2H_STORE_ADDR_HI), HISI_SAS_DEBUGFS_REG(CFG_MAX_TAG), HISI_SAS_DEBUGFS_REG(TRANS_LOCK_ICT_TIME), HISI_SAS_DEBUGFS_REG(HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL), HISI_SAS_DEBUGFS_REG(HGC_SAS_TXFAIL_RETRY_CTRL), HISI_SAS_DEBUGFS_REG(HGC_GET_ITV_TIME), HISI_SAS_DEBUGFS_REG(DEVICE_MSG_WORK_MODE), HISI_SAS_DEBUGFS_REG(OPENA_WT_CONTI_TIME), HISI_SAS_DEBUGFS_REG(I_T_NEXUS_LOSS_TIME), HISI_SAS_DEBUGFS_REG(MAX_CON_TIME_LIMIT_TIME), HISI_SAS_DEBUGFS_REG(BUS_INACTIVE_LIMIT_TIME), HISI_SAS_DEBUGFS_REG(REJECT_TO_OPEN_LIMIT_TIME), HISI_SAS_DEBUGFS_REG(CQ_INT_CONVERGE_EN), HISI_SAS_DEBUGFS_REG(CFG_AGING_TIME), HISI_SAS_DEBUGFS_REG(HGC_DFX_CFG2), HISI_SAS_DEBUGFS_REG(CFG_ABT_SET_QUERY_IPTT), HISI_SAS_DEBUGFS_REG(CFG_ABT_SET_IPTT_DONE), HISI_SAS_DEBUGFS_REG(HGC_IOMB_PROC1_STATUS), HISI_SAS_DEBUGFS_REG(CHNL_INT_STATUS), HISI_SAS_DEBUGFS_REG(HGC_AXI_FIFO_ERR_INFO), HISI_SAS_DEBUGFS_REG(INT_COAL_EN), HISI_SAS_DEBUGFS_REG(OQ_INT_COAL_TIME), HISI_SAS_DEBUGFS_REG(OQ_INT_COAL_CNT), HISI_SAS_DEBUGFS_REG(ENT_INT_COAL_TIME), HISI_SAS_DEBUGFS_REG(ENT_INT_COAL_CNT), HISI_SAS_DEBUGFS_REG(OQ_INT_SRC), HISI_SAS_DEBUGFS_REG(OQ_INT_SRC_MSK), HISI_SAS_DEBUGFS_REG(ENT_INT_SRC1), HISI_SAS_DEBUGFS_REG(ENT_INT_SRC2), HISI_SAS_DEBUGFS_REG(ENT_INT_SRC3), HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK1), HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK2), HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK3), HISI_SAS_DEBUGFS_REG(CHNL_PHYUPDOWN_INT_MSK), HISI_SAS_DEBUGFS_REG(CHNL_ENT_INT_MSK), HISI_SAS_DEBUGFS_REG(HGC_COM_INT_MSK), HISI_SAS_DEBUGFS_REG(SAS_ECC_INTR), HISI_SAS_DEBUGFS_REG(SAS_ECC_INTR_MSK), HISI_SAS_DEBUGFS_REG(HGC_ERR_STAT_EN), HISI_SAS_DEBUGFS_REG(CQE_SEND_CNT), HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_DEPTH), HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_WR_PTR), HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_RD_PTR), HISI_SAS_DEBUGFS_REG(HYPER_STREAM_ID_EN_CFG), HISI_SAS_DEBUGFS_REG(OQ0_INT_SRC_MSK), HISI_SAS_DEBUGFS_REG(COMPL_Q_0_DEPTH), HISI_SAS_DEBUGFS_REG(COMPL_Q_0_WR_PTR), HISI_SAS_DEBUGFS_REG(COMPL_Q_0_RD_PTR), HISI_SAS_DEBUGFS_REG(AWQOS_AWCACHE_CFG), HISI_SAS_DEBUGFS_REG(ARQOS_ARCACHE_CFG), HISI_SAS_DEBUGFS_REG(HILINK_ERR_DFX), HISI_SAS_DEBUGFS_REG(SAS_GPIO_CFG_0), HISI_SAS_DEBUGFS_REG(SAS_GPIO_CFG_1), HISI_SAS_DEBUGFS_REG(SAS_GPIO_TX_0_1), HISI_SAS_DEBUGFS_REG(SAS_CFG_DRIVE_VLD), {} }; static const struct hisi_sas_debugfs_reg debugfs_global_reg = { .lu = debugfs_global_reg_lu, .count = 0x800, }; static const struct hisi_sas_debugfs_reg_lu debugfs_axi_reg_lu[] = { HISI_SAS_DEBUGFS_REG(AM_CFG_MAX_TRANS), HISI_SAS_DEBUGFS_REG(AM_CFG_SINGLE_PORT_MAX_TRANS), HISI_SAS_DEBUGFS_REG(AXI_CFG), HISI_SAS_DEBUGFS_REG(AM_ROB_ECC_ERR_ADDR), {} }; static const struct hisi_sas_debugfs_reg debugfs_axi_reg = { .lu = debugfs_axi_reg_lu, .count = 0x61, .base_off = AXI_MASTER_CFG_BASE, }; static const struct hisi_sas_debugfs_reg_lu debugfs_ras_reg_lu[] = { HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR0), HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR1), HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR0_MASK), HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR1_MASK), HISI_SAS_DEBUGFS_REG(CFG_SAS_RAS_INTR_MASK), HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR2), HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR2_MASK), {} }; static const struct hisi_sas_debugfs_reg debugfs_ras_reg = { .lu = debugfs_ras_reg_lu, .count = 0x10, .base_off = RAS_BASE, }; static void debugfs_snapshot_prepare_v3_hw(struct hisi_hba *hisi_hba) { struct Scsi_Host *shost = hisi_hba->shost; scsi_block_requests(shost); wait_cmds_complete_timeout_v3_hw(hisi_hba, 100, 5000); set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); hisi_sas_sync_cqs(hisi_hba); hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0); } static void debugfs_snapshot_restore_v3_hw(struct hisi_hba *hisi_hba) { struct Scsi_Host *shost = hisi_hba->shost; hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, (u32)((1ULL << hisi_hba->queue_count) - 1)); clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); scsi_unblock_requests(shost); } static void read_iost_itct_cache_v3_hw(struct hisi_hba *hisi_hba, enum hisi_sas_debugfs_cache_type type, u32 *cache) { u32 cache_dw_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * HISI_SAS_IOST_ITCT_CACHE_NUM; struct device *dev = hisi_hba->dev; u32 *buf = cache; u32 i, val; hisi_sas_write32(hisi_hba, TAB_RD_TYPE, type); for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_DW_SZ; i++) { val = hisi_sas_read32(hisi_hba, TAB_DFX); if (val == 0xffffffff) break; } if (val != 0xffffffff) { dev_err(dev, "Issue occurred in reading IOST/ITCT cache!\n"); return; } memset(buf, 0, cache_dw_size * 4); buf[0] = val; for (i = 1; i < cache_dw_size; i++) buf[i] = hisi_sas_read32(hisi_hba, TAB_DFX); } static void hisi_sas_bist_test_prep_v3_hw(struct hisi_hba *hisi_hba) { u32 reg_val; int phy_no = hisi_hba->debugfs_bist_phy_no; int i; /* disable PHY */ hisi_sas_phy_enable(hisi_hba, phy_no, 0); /* update FFE */ for (i = 0; i < FFE_CFG_MAX; i++) hisi_sas_phy_write32(hisi_hba, phy_no, TXDEEMPH_G1 + (i * 0x4), hisi_hba->debugfs_bist_ffe[phy_no][i]); /* disable ALOS */ reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, SERDES_CFG); reg_val |= CFG_ALOS_CHK_DISABLE_MSK; hisi_sas_phy_write32(hisi_hba, phy_no, SERDES_CFG, reg_val); } static void hisi_sas_bist_test_restore_v3_hw(struct hisi_hba *hisi_hba) { u32 reg_val; int phy_no = hisi_hba->debugfs_bist_phy_no; /* disable loopback */ reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, SAS_PHY_BIST_CTRL); reg_val &= ~(CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK | CFG_BIST_TEST_MSK); hisi_sas_phy_write32(hisi_hba, phy_no, SAS_PHY_BIST_CTRL, reg_val); /* enable ALOS */ reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, SERDES_CFG); reg_val &= ~CFG_ALOS_CHK_DISABLE_MSK; hisi_sas_phy_write32(hisi_hba, phy_no, SERDES_CFG, reg_val); /* restore the linkrate */ reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE); /* init OOB link rate as 1.5 Gbits */ reg_val &= ~CFG_PROG_OOB_PHY_LINK_RATE_MSK; reg_val |= (0x8 << CFG_PROG_OOB_PHY_LINK_RATE_OFF); hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE, reg_val); /* enable PHY */ hisi_sas_phy_enable(hisi_hba, phy_no, 1); } #define SAS_PHY_BIST_CODE_INIT 0x1 #define SAS_PHY_BIST_CODE1_INIT 0X80 static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable) { u32 reg_val, mode_tmp; u32 linkrate = hisi_hba->debugfs_bist_linkrate; u32 phy_no = hisi_hba->debugfs_bist_phy_no; u32 *ffe = hisi_hba->debugfs_bist_ffe[phy_no]; u32 code_mode = hisi_hba->debugfs_bist_code_mode; u32 path_mode = hisi_hba->debugfs_bist_mode; u32 *fix_code = &hisi_hba->debugfs_bist_fixed_code[0]; struct device *dev = hisi_hba->dev; dev_info(dev, "BIST info:phy%d link_rate=%d code_mode=%d path_mode=%d ffe={0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x} fixed_code={0x%x, 0x%x}\n", phy_no, linkrate, code_mode, path_mode, ffe[FFE_SAS_1_5_GBPS], ffe[FFE_SAS_3_0_GBPS], ffe[FFE_SAS_6_0_GBPS], ffe[FFE_SAS_12_0_GBPS], ffe[FFE_SATA_1_5_GBPS], ffe[FFE_SATA_3_0_GBPS], ffe[FFE_SATA_6_0_GBPS], fix_code[FIXED_CODE], fix_code[FIXED_CODE_1]); mode_tmp = path_mode ? 2 : 1; if (enable) { /* some preparations before bist test */ hisi_sas_bist_test_prep_v3_hw(hisi_hba); /* set linkrate of bit test*/ reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE); reg_val &= ~CFG_PROG_OOB_PHY_LINK_RATE_MSK; reg_val |= (linkrate << CFG_PROG_OOB_PHY_LINK_RATE_OFF); hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE, reg_val); /* set code mode of bit test */ reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, SAS_PHY_BIST_CTRL); reg_val &= ~(CFG_BIST_MODE_SEL_MSK | CFG_LOOP_TEST_MODE_MSK | CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK | CFG_BIST_TEST_MSK); reg_val |= ((code_mode << CFG_BIST_MODE_SEL_OFF) | (mode_tmp << CFG_LOOP_TEST_MODE_OFF) | CFG_BIST_TEST_MSK); hisi_sas_phy_write32(hisi_hba, phy_no, SAS_PHY_BIST_CTRL, reg_val); /* set the bist init value */ if (code_mode == HISI_SAS_BIST_CODE_MODE_FIXED_DATA) { reg_val = hisi_hba->debugfs_bist_fixed_code[0]; hisi_sas_phy_write32(hisi_hba, phy_no, SAS_PHY_BIST_CODE, reg_val); reg_val = hisi_hba->debugfs_bist_fixed_code[1]; hisi_sas_phy_write32(hisi_hba, phy_no, SAS_PHY_BIST_CODE1, reg_val); } else { hisi_sas_phy_write32(hisi_hba, phy_no, SAS_PHY_BIST_CODE, SAS_PHY_BIST_CODE_INIT); hisi_sas_phy_write32(hisi_hba, phy_no, SAS_PHY_BIST_CODE1, SAS_PHY_BIST_CODE1_INIT); } mdelay(100); reg_val |= (CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK); hisi_sas_phy_write32(hisi_hba, phy_no, SAS_PHY_BIST_CTRL, reg_val); /* clear error bit */ mdelay(100); hisi_sas_phy_read32(hisi_hba, phy_no, SAS_BIST_ERR_CNT); } else { /* disable bist test and recover it */ hisi_hba->debugfs_bist_cnt += hisi_sas_phy_read32(hisi_hba, phy_no, SAS_BIST_ERR_CNT); hisi_sas_bist_test_restore_v3_hw(hisi_hba); } return 0; } static void hisi_sas_map_queues(struct Scsi_Host *shost) { struct hisi_hba *hisi_hba = shost_priv(shost); struct blk_mq_queue_map *qmap; int i, qoff; for (i = 0, qoff = 0; i < shost->nr_maps; i++) { qmap = &shost->tag_set.map[i]; if (i == HCTX_TYPE_DEFAULT) { qmap->nr_queues = hisi_hba->cq_nvecs; } else if (i == HCTX_TYPE_POLL) { qmap->nr_queues = hisi_hba->iopoll_q_cnt; } else { qmap->nr_queues = 0; continue; } /* At least one interrupt hardware queue */ if (!qmap->nr_queues) WARN_ON(i == HCTX_TYPE_DEFAULT); qmap->queue_offset = qoff; if (i == HCTX_TYPE_POLL) blk_mq_map_queues(qmap); else blk_mq_pci_map_queues(qmap, hisi_hba->pci_dev, BASE_VECTORS_V3_HW); qoff += qmap->nr_queues; } } static const struct scsi_host_template sht_v3_hw = { .name = DRV_NAME, .proc_name = DRV_NAME, .module = THIS_MODULE, .queuecommand = sas_queuecommand, .dma_need_drain = ata_scsi_dma_need_drain, .target_alloc = sas_target_alloc, .slave_configure = slave_configure_v3_hw, .scan_finished = hisi_sas_scan_finished, .scan_start = hisi_sas_scan_start, .map_queues = hisi_sas_map_queues, .change_queue_depth = sas_change_queue_depth, .bios_param = sas_bios_param, .this_id = -1, .sg_tablesize = HISI_SAS_SGE_PAGE_CNT, .sg_prot_tablesize = HISI_SAS_SGE_PAGE_CNT, .max_sectors = SCSI_DEFAULT_MAX_SECTORS, .eh_device_reset_handler = sas_eh_device_reset_handler, .eh_target_reset_handler = sas_eh_target_reset_handler, .slave_alloc = hisi_sas_slave_alloc, .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = sas_ioctl, #endif .shost_groups = host_v3_hw_groups, .tag_alloc_policy = BLK_TAG_ALLOC_RR, .host_reset = hisi_sas_host_reset, .host_tagset = 1, .mq_poll = queue_complete_v3_hw, }; static const struct hisi_sas_hw hisi_sas_v3_hw = { .setup_itct = setup_itct_v3_hw, .get_wideport_bitmap = get_wideport_bitmap_v3_hw, .complete_hdr_size = sizeof(struct hisi_sas_complete_v3_hdr), .clear_itct = clear_itct_v3_hw, .sl_notify_ssp = sl_notify_ssp_v3_hw, .prep_ssp = prep_ssp_v3_hw, .prep_smp = prep_smp_v3_hw, .prep_stp = prep_ata_v3_hw, .prep_abort = prep_abort_v3_hw, .start_delivery = start_delivery_v3_hw, .phys_init = phys_init_v3_hw, .phy_start = start_phy_v3_hw, .phy_disable = disable_phy_v3_hw, .phy_hard_reset = phy_hard_reset_v3_hw, .phy_get_max_linkrate = phy_get_max_linkrate_v3_hw, .phy_set_linkrate = phy_set_linkrate_v3_hw, .dereg_device = dereg_device_v3_hw, .soft_reset = soft_reset_v3_hw, .get_phys_state = get_phys_state_v3_hw, .get_events = phy_get_events_v3_hw, .write_gpio = write_gpio_v3_hw, .wait_cmds_complete_timeout = wait_cmds_complete_timeout_v3_hw, .debugfs_snapshot_regs = debugfs_snapshot_regs_v3_hw, }; static struct Scsi_Host * hisi_sas_shost_alloc_pci(struct pci_dev *pdev) { struct Scsi_Host *shost; struct hisi_hba *hisi_hba; struct device *dev = &pdev->dev; shost = scsi_host_alloc(&sht_v3_hw, sizeof(*hisi_hba)); if (!shost) { dev_err(dev, "shost alloc failed\n"); return NULL; } hisi_hba = shost_priv(shost); INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler); INIT_WORK(&hisi_hba->debugfs_work, debugfs_work_handler_v3_hw); hisi_hba->hw = &hisi_sas_v3_hw; hisi_hba->pci_dev = pdev; hisi_hba->dev = dev; hisi_hba->shost = shost; SHOST_TO_SAS_HA(shost) = &hisi_hba->sha; if (prot_mask & ~HISI_SAS_PROT_MASK) dev_err(dev, "unsupported protection mask 0x%x, using default (0x0)\n", prot_mask); else hisi_hba->prot_mask = prot_mask; if (hisi_sas_get_fw_info(hisi_hba) < 0) goto err_out; if (experimental_iopoll_q_cnt < 0 || experimental_iopoll_q_cnt >= hisi_hba->queue_count) dev_err(dev, "iopoll queue count %d cannot exceed or equal 16, using default 0\n", experimental_iopoll_q_cnt); else hisi_hba->iopoll_q_cnt = experimental_iopoll_q_cnt; if (hisi_sas_alloc(hisi_hba)) { hisi_sas_free(hisi_hba); goto err_out; } return shost; err_out: scsi_host_put(shost); dev_err(dev, "shost alloc failed\n"); return NULL; } static void debugfs_snapshot_cq_reg_v3_hw(struct hisi_hba *hisi_hba) { int queue_entry_size = hisi_hba->hw->complete_hdr_size; int dump_index = hisi_hba->debugfs_dump_index; int i; for (i = 0; i < hisi_hba->queue_count; i++) memcpy(hisi_hba->debugfs_cq[dump_index][i].complete_hdr, hisi_hba->complete_hdr[i], HISI_SAS_QUEUE_SLOTS * queue_entry_size); } static void debugfs_snapshot_dq_reg_v3_hw(struct hisi_hba *hisi_hba) { int queue_entry_size = sizeof(struct hisi_sas_cmd_hdr); int dump_index = hisi_hba->debugfs_dump_index; int i; for (i = 0; i < hisi_hba->queue_count; i++) { struct hisi_sas_cmd_hdr *debugfs_cmd_hdr, *cmd_hdr; int j; debugfs_cmd_hdr = hisi_hba->debugfs_dq[dump_index][i].hdr; cmd_hdr = hisi_hba->cmd_hdr[i]; for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++) memcpy(&debugfs_cmd_hdr[j], &cmd_hdr[j], queue_entry_size); } } static void debugfs_snapshot_port_reg_v3_hw(struct hisi_hba *hisi_hba) { int dump_index = hisi_hba->debugfs_dump_index; const struct hisi_sas_debugfs_reg *port = &debugfs_port_reg; int i, phy_cnt; u32 offset; u32 *databuf; for (phy_cnt = 0; phy_cnt < hisi_hba->n_phy; phy_cnt++) { databuf = hisi_hba->debugfs_port_reg[dump_index][phy_cnt].data; for (i = 0; i < port->count; i++, databuf++) { offset = port->base_off + 4 * i; *databuf = hisi_sas_phy_read32(hisi_hba, phy_cnt, offset); } } } static void debugfs_snapshot_global_reg_v3_hw(struct hisi_hba *hisi_hba) { int dump_index = hisi_hba->debugfs_dump_index; u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_GLOBAL].data; int i; for (i = 0; i < debugfs_axi_reg.count; i++, databuf++) *databuf = hisi_sas_read32(hisi_hba, 4 * i); } static void debugfs_snapshot_axi_reg_v3_hw(struct hisi_hba *hisi_hba) { int dump_index = hisi_hba->debugfs_dump_index; u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_AXI].data; const struct hisi_sas_debugfs_reg *axi = &debugfs_axi_reg; int i; for (i = 0; i < axi->count; i++, databuf++) *databuf = hisi_sas_read32(hisi_hba, 4 * i + axi->base_off); } static void debugfs_snapshot_ras_reg_v3_hw(struct hisi_hba *hisi_hba) { int dump_index = hisi_hba->debugfs_dump_index; u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_RAS].data; const struct hisi_sas_debugfs_reg *ras = &debugfs_ras_reg; int i; for (i = 0; i < ras->count; i++, databuf++) *databuf = hisi_sas_read32(hisi_hba, 4 * i + ras->base_off); } static void debugfs_snapshot_itct_reg_v3_hw(struct hisi_hba *hisi_hba) { int dump_index = hisi_hba->debugfs_dump_index; void *cachebuf = hisi_hba->debugfs_itct_cache[dump_index].cache; void *databuf = hisi_hba->debugfs_itct[dump_index].itct; struct hisi_sas_itct *itct; int i; read_iost_itct_cache_v3_hw(hisi_hba, HISI_SAS_ITCT_CACHE, cachebuf); itct = hisi_hba->itct; for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) { memcpy(databuf, itct, sizeof(struct hisi_sas_itct)); databuf += sizeof(struct hisi_sas_itct); } } static void debugfs_snapshot_iost_reg_v3_hw(struct hisi_hba *hisi_hba) { int dump_index = hisi_hba->debugfs_dump_index; int max_command_entries = HISI_SAS_MAX_COMMANDS; void *cachebuf = hisi_hba->debugfs_iost_cache[dump_index].cache; void *databuf = hisi_hba->debugfs_iost[dump_index].iost; struct hisi_sas_iost *iost; int i; read_iost_itct_cache_v3_hw(hisi_hba, HISI_SAS_IOST_CACHE, cachebuf); iost = hisi_hba->iost; for (i = 0; i < max_command_entries; i++, iost++) { memcpy(databuf, iost, sizeof(struct hisi_sas_iost)); databuf += sizeof(struct hisi_sas_iost); } } static const char * debugfs_to_reg_name_v3_hw(int off, int base_off, const struct hisi_sas_debugfs_reg_lu *lu) { for (; lu->name; lu++) { if (off == lu->off - base_off) return lu->name; } return NULL; } static void debugfs_print_reg_v3_hw(u32 *regs_val, struct seq_file *s, const struct hisi_sas_debugfs_reg *reg) { int i; for (i = 0; i < reg->count; i++) { int off = i * 4; const char *name; name = debugfs_to_reg_name_v3_hw(off, reg->base_off, reg->lu); if (name) seq_printf(s, "0x%08x 0x%08x %s\n", off, regs_val[i], name); else seq_printf(s, "0x%08x 0x%08x\n", off, regs_val[i]); } } static int debugfs_global_v3_hw_show(struct seq_file *s, void *p) { struct hisi_sas_debugfs_regs *global = s->private; debugfs_print_reg_v3_hw(global->data, s, &debugfs_global_reg); return 0; } DEFINE_SHOW_ATTRIBUTE(debugfs_global_v3_hw); static int debugfs_axi_v3_hw_show(struct seq_file *s, void *p) { struct hisi_sas_debugfs_regs *axi = s->private; debugfs_print_reg_v3_hw(axi->data, s, &debugfs_axi_reg); return 0; } DEFINE_SHOW_ATTRIBUTE(debugfs_axi_v3_hw); static int debugfs_ras_v3_hw_show(struct seq_file *s, void *p) { struct hisi_sas_debugfs_regs *ras = s->private; debugfs_print_reg_v3_hw(ras->data, s, &debugfs_ras_reg); return 0; } DEFINE_SHOW_ATTRIBUTE(debugfs_ras_v3_hw); static int debugfs_port_v3_hw_show(struct seq_file *s, void *p) { struct hisi_sas_debugfs_port *port = s->private; const struct hisi_sas_debugfs_reg *reg_port = &debugfs_port_reg; debugfs_print_reg_v3_hw(port->data, s, reg_port); return 0; } DEFINE_SHOW_ATTRIBUTE(debugfs_port_v3_hw); static void debugfs_show_row_64_v3_hw(struct seq_file *s, int index, int sz, __le64 *ptr) { int i; /* completion header size not fixed per HW version */ seq_printf(s, "index %04d:\n\t", index); for (i = 1; i <= sz / 8; i++, ptr++) { seq_printf(s, " 0x%016llx", le64_to_cpu(*ptr)); if (!(i % 2)) seq_puts(s, "\n\t"); } seq_puts(s, "\n"); } static void debugfs_show_row_32_v3_hw(struct seq_file *s, int index, int sz, __le32 *ptr) { int i; /* completion header size not fixed per HW version */ seq_printf(s, "index %04d:\n\t", index); for (i = 1; i <= sz / 4; i++, ptr++) { seq_printf(s, " 0x%08x", le32_to_cpu(*ptr)); if (!(i % 4)) seq_puts(s, "\n\t"); } seq_puts(s, "\n"); } static void debugfs_cq_show_slot_v3_hw(struct seq_file *s, int slot, struct hisi_sas_debugfs_cq *debugfs_cq) { struct hisi_sas_cq *cq = debugfs_cq->cq; struct hisi_hba *hisi_hba = cq->hisi_hba; __le32 *complete_hdr = debugfs_cq->complete_hdr + (hisi_hba->hw->complete_hdr_size * slot); debugfs_show_row_32_v3_hw(s, slot, hisi_hba->hw->complete_hdr_size, complete_hdr); } static int debugfs_cq_v3_hw_show(struct seq_file *s, void *p) { struct hisi_sas_debugfs_cq *debugfs_cq = s->private; int slot; for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) debugfs_cq_show_slot_v3_hw(s, slot, debugfs_cq); return 0; } DEFINE_SHOW_ATTRIBUTE(debugfs_cq_v3_hw); static void debugfs_dq_show_slot_v3_hw(struct seq_file *s, int slot, void *dq_ptr) { struct hisi_sas_debugfs_dq *debugfs_dq = dq_ptr; void *cmd_queue = debugfs_dq->hdr; __le32 *cmd_hdr = cmd_queue + sizeof(struct hisi_sas_cmd_hdr) * slot; debugfs_show_row_32_v3_hw(s, slot, sizeof(struct hisi_sas_cmd_hdr), cmd_hdr); } static int debugfs_dq_v3_hw_show(struct seq_file *s, void *p) { int slot; for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) debugfs_dq_show_slot_v3_hw(s, slot, s->private); return 0; } DEFINE_SHOW_ATTRIBUTE(debugfs_dq_v3_hw); static int debugfs_iost_v3_hw_show(struct seq_file *s, void *p) { struct hisi_sas_debugfs_iost *debugfs_iost = s->private; struct hisi_sas_iost *iost = debugfs_iost->iost; int i, max_command_entries = HISI_SAS_MAX_COMMANDS; for (i = 0; i < max_command_entries; i++, iost++) { __le64 *data = &iost->qw0; debugfs_show_row_64_v3_hw(s, i, sizeof(*iost), data); } return 0; } DEFINE_SHOW_ATTRIBUTE(debugfs_iost_v3_hw); static int debugfs_iost_cache_v3_hw_show(struct seq_file *s, void *p) { struct hisi_sas_debugfs_iost_cache *debugfs_iost_cache = s->private; struct hisi_sas_iost_itct_cache *iost_cache = debugfs_iost_cache->cache; u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4; int i, tab_idx; __le64 *iost; for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_NUM; i++, iost_cache++) { /* * Data struct of IOST cache: * Data[1]: BIT0~15: Table index * Bit16: Valid mask * Data[2]~[9]: IOST table */ tab_idx = (iost_cache->data[1] & 0xffff); iost = (__le64 *)iost_cache; debugfs_show_row_64_v3_hw(s, tab_idx, cache_size, iost); } return 0; } DEFINE_SHOW_ATTRIBUTE(debugfs_iost_cache_v3_hw); static int debugfs_itct_v3_hw_show(struct seq_file *s, void *p) { int i; struct hisi_sas_debugfs_itct *debugfs_itct = s->private; struct hisi_sas_itct *itct = debugfs_itct->itct; for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) { __le64 *data = &itct->qw0; debugfs_show_row_64_v3_hw(s, i, sizeof(*itct), data); } return 0; } DEFINE_SHOW_ATTRIBUTE(debugfs_itct_v3_hw); static int debugfs_itct_cache_v3_hw_show(struct seq_file *s, void *p) { struct hisi_sas_debugfs_itct_cache *debugfs_itct_cache = s->private; struct hisi_sas_iost_itct_cache *itct_cache = debugfs_itct_cache->cache; u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4; int i, tab_idx; __le64 *itct; for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_NUM; i++, itct_cache++) { /* * Data struct of ITCT cache: * Data[1]: BIT0~15: Table index * Bit16: Valid mask * Data[2]~[9]: ITCT table */ tab_idx = itct_cache->data[1] & 0xffff; itct = (__le64 *)itct_cache; debugfs_show_row_64_v3_hw(s, tab_idx, cache_size, itct); } return 0; } DEFINE_SHOW_ATTRIBUTE(debugfs_itct_cache_v3_hw); static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba) { u64 *debugfs_timestamp; int dump_index = hisi_hba->debugfs_dump_index; struct dentry *dump_dentry; struct dentry *dentry; char name[256]; int p; int c; int d; snprintf(name, 256, "%d", dump_index); dump_dentry = debugfs_create_dir(name, hisi_hba->debugfs_dump_dentry); debugfs_timestamp = &hisi_hba->debugfs_timestamp[dump_index]; debugfs_create_u64("timestamp", 0400, dump_dentry, debugfs_timestamp); debugfs_create_file("global", 0400, dump_dentry, &hisi_hba->debugfs_regs[dump_index][DEBUGFS_GLOBAL], &debugfs_global_v3_hw_fops); /* Create port dir and files */ dentry = debugfs_create_dir("port", dump_dentry); for (p = 0; p < hisi_hba->n_phy; p++) { snprintf(name, 256, "%d", p); debugfs_create_file(name, 0400, dentry, &hisi_hba->debugfs_port_reg[dump_index][p], &debugfs_port_v3_hw_fops); } /* Create CQ dir and files */ dentry = debugfs_create_dir("cq", dump_dentry); for (c = 0; c < hisi_hba->queue_count; c++) { snprintf(name, 256, "%d", c); debugfs_create_file(name, 0400, dentry, &hisi_hba->debugfs_cq[dump_index][c], &debugfs_cq_v3_hw_fops); } /* Create DQ dir and files */ dentry = debugfs_create_dir("dq", dump_dentry); for (d = 0; d < hisi_hba->queue_count; d++) { snprintf(name, 256, "%d", d); debugfs_create_file(name, 0400, dentry, &hisi_hba->debugfs_dq[dump_index][d], &debugfs_dq_v3_hw_fops); } debugfs_create_file("iost", 0400, dump_dentry, &hisi_hba->debugfs_iost[dump_index], &debugfs_iost_v3_hw_fops); debugfs_create_file("iost_cache", 0400, dump_dentry, &hisi_hba->debugfs_iost_cache[dump_index], &debugfs_iost_cache_v3_hw_fops); debugfs_create_file("itct", 0400, dump_dentry, &hisi_hba->debugfs_itct[dump_index], &debugfs_itct_v3_hw_fops); debugfs_create_file("itct_cache", 0400, dump_dentry, &hisi_hba->debugfs_itct_cache[dump_index], &debugfs_itct_cache_v3_hw_fops); debugfs_create_file("axi", 0400, dump_dentry, &hisi_hba->debugfs_regs[dump_index][DEBUGFS_AXI], &debugfs_axi_v3_hw_fops); debugfs_create_file("ras", 0400, dump_dentry, &hisi_hba->debugfs_regs[dump_index][DEBUGFS_RAS], &debugfs_ras_v3_hw_fops); } static void debugfs_snapshot_regs_v3_hw(struct hisi_hba *hisi_hba) { int debugfs_dump_index = hisi_hba->debugfs_dump_index; struct device *dev = hisi_hba->dev; u64 timestamp = local_clock(); if (debugfs_dump_index >= hisi_sas_debugfs_dump_count) { dev_warn(dev, "dump count exceeded!\n"); return; } do_div(timestamp, NSEC_PER_MSEC); hisi_hba->debugfs_timestamp[debugfs_dump_index] = timestamp; debugfs_snapshot_prepare_v3_hw(hisi_hba); debugfs_snapshot_global_reg_v3_hw(hisi_hba); debugfs_snapshot_port_reg_v3_hw(hisi_hba); debugfs_snapshot_axi_reg_v3_hw(hisi_hba); debugfs_snapshot_ras_reg_v3_hw(hisi_hba); debugfs_snapshot_cq_reg_v3_hw(hisi_hba); debugfs_snapshot_dq_reg_v3_hw(hisi_hba); debugfs_snapshot_itct_reg_v3_hw(hisi_hba); debugfs_snapshot_iost_reg_v3_hw(hisi_hba); debugfs_create_files_v3_hw(hisi_hba); debugfs_snapshot_restore_v3_hw(hisi_hba); hisi_hba->debugfs_dump_index++; } static ssize_t debugfs_trigger_dump_v3_hw_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct hisi_hba *hisi_hba = file->f_inode->i_private; char buf[8]; if (hisi_hba->debugfs_dump_index >= hisi_sas_debugfs_dump_count) return -EFAULT; if (count > 8) return -EFAULT; if (copy_from_user(buf, user_buf, count)) return -EFAULT; if (buf[0] != '1') return -EFAULT; queue_work(hisi_hba->wq, &hisi_hba->debugfs_work); return count; } static const struct file_operations debugfs_trigger_dump_v3_hw_fops = { .write = &debugfs_trigger_dump_v3_hw_write, .owner = THIS_MODULE, }; enum { HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL = 0, HISI_SAS_BIST_LOOPBACK_MODE_SERDES, HISI_SAS_BIST_LOOPBACK_MODE_REMOTE, }; static const struct { int value; char *name; } debugfs_loop_linkrate_v3_hw[] = { { SAS_LINK_RATE_1_5_GBPS, "1.5 Gbit" }, { SAS_LINK_RATE_3_0_GBPS, "3.0 Gbit" }, { SAS_LINK_RATE_6_0_GBPS, "6.0 Gbit" }, { SAS_LINK_RATE_12_0_GBPS, "12.0 Gbit" }, }; static int debugfs_bist_linkrate_v3_hw_show(struct seq_file *s, void *p) { struct hisi_hba *hisi_hba = s->private; int i; for (i = 0; i < ARRAY_SIZE(debugfs_loop_linkrate_v3_hw); i++) { int match = (hisi_hba->debugfs_bist_linkrate == debugfs_loop_linkrate_v3_hw[i].value); seq_printf(s, "%s%s%s ", match ? "[" : "", debugfs_loop_linkrate_v3_hw[i].name, match ? "]" : ""); } seq_puts(s, "\n"); return 0; } static ssize_t debugfs_bist_linkrate_v3_hw_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos) { struct seq_file *m = filp->private_data; struct hisi_hba *hisi_hba = m->private; char kbuf[16] = {}, *pkbuf; bool found = false; int i; if (hisi_hba->debugfs_bist_enable) return -EPERM; if (count >= sizeof(kbuf)) return -EOVERFLOW; if (copy_from_user(kbuf, buf, count)) return -EINVAL; pkbuf = strstrip(kbuf); for (i = 0; i < ARRAY_SIZE(debugfs_loop_linkrate_v3_hw); i++) { if (!strncmp(debugfs_loop_linkrate_v3_hw[i].name, pkbuf, 16)) { hisi_hba->debugfs_bist_linkrate = debugfs_loop_linkrate_v3_hw[i].value; found = true; break; } } if (!found) return -EINVAL; return count; } static int debugfs_bist_linkrate_v3_hw_open(struct inode *inode, struct file *filp) { return single_open(filp, debugfs_bist_linkrate_v3_hw_show, inode->i_private); } static const struct file_operations debugfs_bist_linkrate_v3_hw_fops = { .open = debugfs_bist_linkrate_v3_hw_open, .read = seq_read, .write = debugfs_bist_linkrate_v3_hw_write, .llseek = seq_lseek, .release = single_release, .owner = THIS_MODULE, }; static const struct { int value; char *name; } debugfs_loop_code_mode_v3_hw[] = { { HISI_SAS_BIST_CODE_MODE_PRBS7, "PRBS7" }, { HISI_SAS_BIST_CODE_MODE_PRBS23, "PRBS23" }, { HISI_SAS_BIST_CODE_MODE_PRBS31, "PRBS31" }, { HISI_SAS_BIST_CODE_MODE_JTPAT, "JTPAT" }, { HISI_SAS_BIST_CODE_MODE_CJTPAT, "CJTPAT" }, { HISI_SAS_BIST_CODE_MODE_SCRAMBED_0, "SCRAMBED_0" }, { HISI_SAS_BIST_CODE_MODE_TRAIN, "TRAIN" }, { HISI_SAS_BIST_CODE_MODE_TRAIN_DONE, "TRAIN_DONE" }, { HISI_SAS_BIST_CODE_MODE_HFTP, "HFTP" }, { HISI_SAS_BIST_CODE_MODE_MFTP, "MFTP" }, { HISI_SAS_BIST_CODE_MODE_LFTP, "LFTP" }, { HISI_SAS_BIST_CODE_MODE_FIXED_DATA, "FIXED_DATA" }, }; static int debugfs_bist_code_mode_v3_hw_show(struct seq_file *s, void *p) { struct hisi_hba *hisi_hba = s->private; int i; for (i = 0; i < ARRAY_SIZE(debugfs_loop_code_mode_v3_hw); i++) { int match = (hisi_hba->debugfs_bist_code_mode == debugfs_loop_code_mode_v3_hw[i].value); seq_printf(s, "%s%s%s ", match ? "[" : "", debugfs_loop_code_mode_v3_hw[i].name, match ? "]" : ""); } seq_puts(s, "\n"); return 0; } static ssize_t debugfs_bist_code_mode_v3_hw_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos) { struct seq_file *m = filp->private_data; struct hisi_hba *hisi_hba = m->private; char kbuf[16] = {}, *pkbuf; bool found = false; int i; if (hisi_hba->debugfs_bist_enable) return -EPERM; if (count >= sizeof(kbuf)) return -EINVAL; if (copy_from_user(kbuf, buf, count)) return -EOVERFLOW; pkbuf = strstrip(kbuf); for (i = 0; i < ARRAY_SIZE(debugfs_loop_code_mode_v3_hw); i++) { if (!strncmp(debugfs_loop_code_mode_v3_hw[i].name, pkbuf, 16)) { hisi_hba->debugfs_bist_code_mode = debugfs_loop_code_mode_v3_hw[i].value; found = true; break; } } if (!found) return -EINVAL; return count; } static int debugfs_bist_code_mode_v3_hw_open(struct inode *inode, struct file *filp) { return single_open(filp, debugfs_bist_code_mode_v3_hw_show, inode->i_private); } static const struct file_operations debugfs_bist_code_mode_v3_hw_fops = { .open = debugfs_bist_code_mode_v3_hw_open, .read = seq_read, .write = debugfs_bist_code_mode_v3_hw_write, .llseek = seq_lseek, .release = single_release, .owner = THIS_MODULE, }; static ssize_t debugfs_bist_phy_v3_hw_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos) { struct seq_file *m = filp->private_data; struct hisi_hba *hisi_hba = m->private; unsigned int phy_no; int val; if (hisi_hba->debugfs_bist_enable) return -EPERM; val = kstrtouint_from_user(buf, count, 0, &phy_no); if (val) return val; if (phy_no >= hisi_hba->n_phy) return -EINVAL; hisi_hba->debugfs_bist_phy_no = phy_no; return count; } static int debugfs_bist_phy_v3_hw_show(struct seq_file *s, void *p) { struct hisi_hba *hisi_hba = s->private; seq_printf(s, "%d\n", hisi_hba->debugfs_bist_phy_no); return 0; } static int debugfs_bist_phy_v3_hw_open(struct inode *inode, struct file *filp) { return single_open(filp, debugfs_bist_phy_v3_hw_show, inode->i_private); } static const struct file_operations debugfs_bist_phy_v3_hw_fops = { .open = debugfs_bist_phy_v3_hw_open, .read = seq_read, .write = debugfs_bist_phy_v3_hw_write, .llseek = seq_lseek, .release = single_release, .owner = THIS_MODULE, }; static ssize_t debugfs_bist_cnt_v3_hw_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos) { struct seq_file *m = filp->private_data; struct hisi_hba *hisi_hba = m->private; unsigned int cnt; int val; if (hisi_hba->debugfs_bist_enable) return -EPERM; val = kstrtouint_from_user(buf, count, 0, &cnt); if (val) return val; if (cnt) return -EINVAL; hisi_hba->debugfs_bist_cnt = 0; return count; } static int debugfs_bist_cnt_v3_hw_show(struct seq_file *s, void *p) { struct hisi_hba *hisi_hba = s->private; seq_printf(s, "%u\n", hisi_hba->debugfs_bist_cnt); return 0; } static int debugfs_bist_cnt_v3_hw_open(struct inode *inode, struct file *filp) { return single_open(filp, debugfs_bist_cnt_v3_hw_show, inode->i_private); } static const struct file_operations debugfs_bist_cnt_v3_hw_ops = { .open = debugfs_bist_cnt_v3_hw_open, .read = seq_read, .write = debugfs_bist_cnt_v3_hw_write, .llseek = seq_lseek, .release = single_release, .owner = THIS_MODULE, }; static const struct { int value; char *name; } debugfs_loop_modes_v3_hw[] = { { HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL, "digital" }, { HISI_SAS_BIST_LOOPBACK_MODE_SERDES, "serdes" }, { HISI_SAS_BIST_LOOPBACK_MODE_REMOTE, "remote" }, }; static int debugfs_bist_mode_v3_hw_show(struct seq_file *s, void *p) { struct hisi_hba *hisi_hba = s->private; int i; for (i = 0; i < ARRAY_SIZE(debugfs_loop_modes_v3_hw); i++) { int match = (hisi_hba->debugfs_bist_mode == debugfs_loop_modes_v3_hw[i].value); seq_printf(s, "%s%s%s ", match ? "[" : "", debugfs_loop_modes_v3_hw[i].name, match ? "]" : ""); } seq_puts(s, "\n"); return 0; } static ssize_t debugfs_bist_mode_v3_hw_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos) { struct seq_file *m = filp->private_data; struct hisi_hba *hisi_hba = m->private; char kbuf[16] = {}, *pkbuf; bool found = false; int i; if (hisi_hba->debugfs_bist_enable) return -EPERM; if (count >= sizeof(kbuf)) return -EINVAL; if (copy_from_user(kbuf, buf, count)) return -EOVERFLOW; pkbuf = strstrip(kbuf); for (i = 0; i < ARRAY_SIZE(debugfs_loop_modes_v3_hw); i++) { if (!strncmp(debugfs_loop_modes_v3_hw[i].name, pkbuf, 16)) { hisi_hba->debugfs_bist_mode = debugfs_loop_modes_v3_hw[i].value; found = true; break; } } if (!found) return -EINVAL; return count; } static int debugfs_bist_mode_v3_hw_open(struct inode *inode, struct file *filp) { return single_open(filp, debugfs_bist_mode_v3_hw_show, inode->i_private); } static const struct file_operations debugfs_bist_mode_v3_hw_fops = { .open = debugfs_bist_mode_v3_hw_open, .read = seq_read, .write = debugfs_bist_mode_v3_hw_write, .llseek = seq_lseek, .release = single_release, .owner = THIS_MODULE, }; static ssize_t debugfs_bist_enable_v3_hw_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos) { struct seq_file *m = filp->private_data; struct hisi_hba *hisi_hba = m->private; unsigned int enable; int val; val = kstrtouint_from_user(buf, count, 0, &enable); if (val) return val; if (enable > 1) return -EINVAL; if (enable == hisi_hba->debugfs_bist_enable) return count; val = debugfs_set_bist_v3_hw(hisi_hba, enable); if (val < 0) return val; hisi_hba->debugfs_bist_enable = enable; return count; } static int debugfs_bist_enable_v3_hw_show(struct seq_file *s, void *p) { struct hisi_hba *hisi_hba = s->private; seq_printf(s, "%d\n", hisi_hba->debugfs_bist_enable); return 0; } static int debugfs_bist_enable_v3_hw_open(struct inode *inode, struct file *filp) { return single_open(filp, debugfs_bist_enable_v3_hw_show, inode->i_private); } static const struct file_operations debugfs_bist_enable_v3_hw_fops = { .open = debugfs_bist_enable_v3_hw_open, .read = seq_read, .write = debugfs_bist_enable_v3_hw_write, .llseek = seq_lseek, .release = single_release, .owner = THIS_MODULE, }; static const struct { char *name; } debugfs_ffe_name_v3_hw[FFE_CFG_MAX] = { { "SAS_1_5_GBPS" }, { "SAS_3_0_GBPS" }, { "SAS_6_0_GBPS" }, { "SAS_12_0_GBPS" }, { "FFE_RESV" }, { "SATA_1_5_GBPS" }, { "SATA_3_0_GBPS" }, { "SATA_6_0_GBPS" }, }; static ssize_t debugfs_v3_hw_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos) { struct seq_file *m = filp->private_data; u32 *val = m->private; int res; res = kstrtouint_from_user(buf, count, 0, val); if (res) return res; return count; } static int debugfs_v3_hw_show(struct seq_file *s, void *p) { u32 *val = s->private; seq_printf(s, "0x%x\n", *val); return 0; } static int debugfs_v3_hw_open(struct inode *inode, struct file *filp) { return single_open(filp, debugfs_v3_hw_show, inode->i_private); } static const struct file_operations debugfs_v3_hw_fops = { .open = debugfs_v3_hw_open, .read = seq_read, .write = debugfs_v3_hw_write, .llseek = seq_lseek, .release = single_release, .owner = THIS_MODULE, }; static ssize_t debugfs_phy_down_cnt_v3_hw_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos) { struct seq_file *s = filp->private_data; struct hisi_sas_phy *phy = s->private; unsigned int set_val; int res; res = kstrtouint_from_user(buf, count, 0, &set_val); if (res) return res; if (set_val > 0) return -EINVAL; atomic_set(&phy->down_cnt, 0); return count; } static int debugfs_phy_down_cnt_v3_hw_show(struct seq_file *s, void *p) { struct hisi_sas_phy *phy = s->private; seq_printf(s, "%d\n", atomic_read(&phy->down_cnt)); return 0; } static int debugfs_phy_down_cnt_v3_hw_open(struct inode *inode, struct file *filp) { return single_open(filp, debugfs_phy_down_cnt_v3_hw_show, inode->i_private); } static const struct file_operations debugfs_phy_down_cnt_v3_hw_fops = { .open = debugfs_phy_down_cnt_v3_hw_open, .read = seq_read, .write = debugfs_phy_down_cnt_v3_hw_write, .llseek = seq_lseek, .release = single_release, .owner = THIS_MODULE, }; enum fifo_dump_mode_v3_hw { FIFO_DUMP_FORVER = (1U << 0), FIFO_DUMP_AFTER_TRIGGER = (1U << 1), FIFO_DUMP_UNTILL_TRIGGER = (1U << 2), }; enum fifo_trigger_mode_v3_hw { FIFO_TRIGGER_EDGE = (1U << 0), FIFO_TRIGGER_SAME_LEVEL = (1U << 1), FIFO_TRIGGER_DIFF_LEVEL = (1U << 2), }; static int debugfs_is_fifo_config_valid_v3_hw(struct hisi_sas_phy *phy) { struct hisi_hba *hisi_hba = phy->hisi_hba; if (phy->fifo.signal_sel > 0xf) { dev_info(hisi_hba->dev, "Invalid signal select: %u\n", phy->fifo.signal_sel); return -EINVAL; } switch (phy->fifo.dump_mode) { case FIFO_DUMP_FORVER: case FIFO_DUMP_AFTER_TRIGGER: case FIFO_DUMP_UNTILL_TRIGGER: break; default: dev_info(hisi_hba->dev, "Invalid dump mode: %u\n", phy->fifo.dump_mode); return -EINVAL; } /* when FIFO_DUMP_FORVER, no need to check trigger_mode */ if (phy->fifo.dump_mode == FIFO_DUMP_FORVER) return 0; switch (phy->fifo.trigger_mode) { case FIFO_TRIGGER_EDGE: case FIFO_TRIGGER_SAME_LEVEL: case FIFO_TRIGGER_DIFF_LEVEL: break; default: dev_info(hisi_hba->dev, "Invalid trigger mode: %u\n", phy->fifo.trigger_mode); return -EINVAL; } return 0; } static int debugfs_update_fifo_config_v3_hw(struct hisi_sas_phy *phy) { u32 trigger_mode = phy->fifo.trigger_mode; u32 signal_sel = phy->fifo.signal_sel; u32 dump_mode = phy->fifo.dump_mode; struct hisi_hba *hisi_hba = phy->hisi_hba; int phy_no = phy->sas_phy.id; u32 reg_val; int res; /* Check the validity of trace FIFO configuration */ res = debugfs_is_fifo_config_valid_v3_hw(phy); if (res) return res; reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_CTRL); /* Disable trace FIFO before update configuration */ reg_val |= DFX_FIFO_CTRL_DUMP_DISABLE_MSK; /* Update trace FIFO configuration */ reg_val &= ~(DFX_FIFO_CTRL_DUMP_MODE_MSK | DFX_FIFO_CTRL_SIGNAL_SEL_MSK | DFX_FIFO_CTRL_TRIGGER_MODE_MSK); reg_val |= ((trigger_mode << DFX_FIFO_CTRL_TRIGGER_MODE_OFF) | (dump_mode << DFX_FIFO_CTRL_DUMP_MODE_OFF) | (signal_sel << DFX_FIFO_CTRL_SIGNAL_SEL_OFF)); hisi_sas_phy_write32(hisi_hba, phy_no, DFX_FIFO_CTRL, reg_val); hisi_sas_phy_write32(hisi_hba, phy_no, DFX_FIFO_DUMP_MSK, phy->fifo.dump_msk); hisi_sas_phy_write32(hisi_hba, phy_no, DFX_FIFO_TRIGGER, phy->fifo.trigger); hisi_sas_phy_write32(hisi_hba, phy_no, DFX_FIFO_TRIGGER_MSK, phy->fifo.trigger_msk); /* Enable trace FIFO after updated configuration */ reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_CTRL); reg_val &= ~DFX_FIFO_CTRL_DUMP_DISABLE_MSK; hisi_sas_phy_write32(hisi_hba, phy_no, DFX_FIFO_CTRL, reg_val); return 0; } static ssize_t debugfs_fifo_update_cfg_v3_hw_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos) { struct hisi_sas_phy *phy = filp->private_data; bool update; int val; val = kstrtobool_from_user(buf, count, &update); if (val) return val; if (update != 1) return -EINVAL; val = debugfs_update_fifo_config_v3_hw(phy); if (val) return val; return count; } static const struct file_operations debugfs_fifo_update_cfg_v3_hw_fops = { .open = simple_open, .write = debugfs_fifo_update_cfg_v3_hw_write, .owner = THIS_MODULE, }; static void debugfs_read_fifo_data_v3_hw(struct hisi_sas_phy *phy) { struct hisi_hba *hisi_hba = phy->hisi_hba; u32 *buf = phy->fifo.rd_data; int phy_no = phy->sas_phy.id; u32 val; int i; memset(buf, 0, sizeof(phy->fifo.rd_data)); /* Disable trace FIFO before read data */ val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_CTRL); val |= DFX_FIFO_CTRL_DUMP_DISABLE_MSK; hisi_sas_phy_write32(hisi_hba, phy_no, DFX_FIFO_CTRL, val); for (i = 0; i < HISI_SAS_FIFO_DATA_DW_SIZE; i++) { val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_RD_DATA); buf[i] = val; } /* Enable trace FIFO after read data */ val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_CTRL); val &= ~DFX_FIFO_CTRL_DUMP_DISABLE_MSK; hisi_sas_phy_write32(hisi_hba, phy_no, DFX_FIFO_CTRL, val); } static int debugfs_fifo_data_v3_hw_show(struct seq_file *s, void *p) { struct hisi_sas_phy *phy = s->private; debugfs_read_fifo_data_v3_hw(phy); debugfs_show_row_32_v3_hw(s, 0, HISI_SAS_FIFO_DATA_DW_SIZE * 4, (__le32 *)phy->fifo.rd_data); return 0; } DEFINE_SHOW_ATTRIBUTE(debugfs_fifo_data_v3_hw); static void debugfs_fifo_init_v3_hw(struct hisi_hba *hisi_hba) { int phy_no; hisi_hba->debugfs_fifo_dentry = debugfs_create_dir("fifo", hisi_hba->debugfs_dir); for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; struct dentry *port_dentry; char name[256]; u32 val; /* get default configuration for trace FIFO */ val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_CTRL); val &= DFX_FIFO_CTRL_DUMP_MODE_MSK; val >>= DFX_FIFO_CTRL_DUMP_MODE_OFF; phy->fifo.dump_mode = val; val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_CTRL); val &= DFX_FIFO_CTRL_TRIGGER_MODE_MSK; val >>= DFX_FIFO_CTRL_TRIGGER_MODE_OFF; phy->fifo.trigger_mode = val; val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_CTRL); val &= DFX_FIFO_CTRL_SIGNAL_SEL_MSK; val >>= DFX_FIFO_CTRL_SIGNAL_SEL_OFF; phy->fifo.signal_sel = val; val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_DUMP_MSK); phy->fifo.dump_msk = val; val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_TRIGGER); phy->fifo.trigger = val; val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_TRIGGER_MSK); phy->fifo.trigger_msk = val; snprintf(name, 256, "%d", phy_no); port_dentry = debugfs_create_dir(name, hisi_hba->debugfs_fifo_dentry); debugfs_create_file("update_config", 0200, port_dentry, phy, &debugfs_fifo_update_cfg_v3_hw_fops); debugfs_create_file("signal_sel", 0600, port_dentry, &phy->fifo.signal_sel, &debugfs_v3_hw_fops); debugfs_create_file("dump_msk", 0600, port_dentry, &phy->fifo.dump_msk, &debugfs_v3_hw_fops); debugfs_create_file("dump_mode", 0600, port_dentry, &phy->fifo.dump_mode, &debugfs_v3_hw_fops); debugfs_create_file("trigger_mode", 0600, port_dentry, &phy->fifo.trigger_mode, &debugfs_v3_hw_fops); debugfs_create_file("trigger", 0600, port_dentry, &phy->fifo.trigger, &debugfs_v3_hw_fops); debugfs_create_file("trigger_msk", 0600, port_dentry, &phy->fifo.trigger_msk, &debugfs_v3_hw_fops); debugfs_create_file("fifo_data", 0400, port_dentry, phy, &debugfs_fifo_data_v3_hw_fops); } } static void debugfs_work_handler_v3_hw(struct work_struct *work) { struct hisi_hba *hisi_hba = container_of(work, struct hisi_hba, debugfs_work); debugfs_snapshot_regs_v3_hw(hisi_hba); } static void debugfs_release_v3_hw(struct hisi_hba *hisi_hba, int dump_index) { struct device *dev = hisi_hba->dev; int i; devm_kfree(dev, hisi_hba->debugfs_iost_cache[dump_index].cache); devm_kfree(dev, hisi_hba->debugfs_itct_cache[dump_index].cache); devm_kfree(dev, hisi_hba->debugfs_iost[dump_index].iost); devm_kfree(dev, hisi_hba->debugfs_itct[dump_index].itct); for (i = 0; i < hisi_hba->queue_count; i++) devm_kfree(dev, hisi_hba->debugfs_dq[dump_index][i].hdr); for (i = 0; i < hisi_hba->queue_count; i++) devm_kfree(dev, hisi_hba->debugfs_cq[dump_index][i].complete_hdr); for (i = 0; i < DEBUGFS_REGS_NUM; i++) devm_kfree(dev, hisi_hba->debugfs_regs[dump_index][i].data); for (i = 0; i < hisi_hba->n_phy; i++) devm_kfree(dev, hisi_hba->debugfs_port_reg[dump_index][i].data); } static const struct hisi_sas_debugfs_reg *debugfs_reg_array_v3_hw[DEBUGFS_REGS_NUM] = { [DEBUGFS_GLOBAL] = &debugfs_global_reg, [DEBUGFS_AXI] = &debugfs_axi_reg, [DEBUGFS_RAS] = &debugfs_ras_reg, }; static int debugfs_alloc_v3_hw(struct hisi_hba *hisi_hba, int dump_index) { const struct hisi_sas_hw *hw = hisi_hba->hw; struct device *dev = hisi_hba->dev; int p, c, d, r, i; size_t sz; for (r = 0; r < DEBUGFS_REGS_NUM; r++) { struct hisi_sas_debugfs_regs *regs = &hisi_hba->debugfs_regs[dump_index][r]; sz = debugfs_reg_array_v3_hw[r]->count * 4; regs->data = devm_kmalloc(dev, sz, GFP_KERNEL); if (!regs->data) goto fail; regs->hisi_hba = hisi_hba; } sz = debugfs_port_reg.count * 4; for (p = 0; p < hisi_hba->n_phy; p++) { struct hisi_sas_debugfs_port *port = &hisi_hba->debugfs_port_reg[dump_index][p]; port->data = devm_kmalloc(dev, sz, GFP_KERNEL); if (!port->data) goto fail; port->phy = &hisi_hba->phy[p]; } sz = hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; for (c = 0; c < hisi_hba->queue_count; c++) { struct hisi_sas_debugfs_cq *cq = &hisi_hba->debugfs_cq[dump_index][c]; cq->complete_hdr = devm_kmalloc(dev, sz, GFP_KERNEL); if (!cq->complete_hdr) goto fail; cq->cq = &hisi_hba->cq[c]; } sz = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; for (d = 0; d < hisi_hba->queue_count; d++) { struct hisi_sas_debugfs_dq *dq = &hisi_hba->debugfs_dq[dump_index][d]; dq->hdr = devm_kmalloc(dev, sz, GFP_KERNEL); if (!dq->hdr) goto fail; dq->dq = &hisi_hba->dq[d]; } sz = HISI_SAS_MAX_COMMANDS * sizeof(struct hisi_sas_iost); hisi_hba->debugfs_iost[dump_index].iost = devm_kmalloc(dev, sz, GFP_KERNEL); if (!hisi_hba->debugfs_iost[dump_index].iost) goto fail; sz = HISI_SAS_IOST_ITCT_CACHE_NUM * sizeof(struct hisi_sas_iost_itct_cache); hisi_hba->debugfs_iost_cache[dump_index].cache = devm_kmalloc(dev, sz, GFP_KERNEL); if (!hisi_hba->debugfs_iost_cache[dump_index].cache) goto fail; sz = HISI_SAS_IOST_ITCT_CACHE_NUM * sizeof(struct hisi_sas_iost_itct_cache); hisi_hba->debugfs_itct_cache[dump_index].cache = devm_kmalloc(dev, sz, GFP_KERNEL); if (!hisi_hba->debugfs_itct_cache[dump_index].cache) goto fail; /* New memory allocation must be locate before itct */ sz = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); hisi_hba->debugfs_itct[dump_index].itct = devm_kmalloc(dev, sz, GFP_KERNEL); if (!hisi_hba->debugfs_itct[dump_index].itct) goto fail; return 0; fail: for (i = 0; i < hisi_sas_debugfs_dump_count; i++) debugfs_release_v3_hw(hisi_hba, i); return -ENOMEM; } static void debugfs_phy_down_cnt_init_v3_hw(struct hisi_hba *hisi_hba) { struct dentry *dir = debugfs_create_dir("phy_down_cnt", hisi_hba->debugfs_dir); char name[16]; int phy_no; for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { snprintf(name, 16, "%d", phy_no); debugfs_create_file(name, 0600, dir, &hisi_hba->phy[phy_no], &debugfs_phy_down_cnt_v3_hw_fops); } } static void debugfs_bist_init_v3_hw(struct hisi_hba *hisi_hba) { struct dentry *ports_dentry; int phy_no; hisi_hba->debugfs_bist_dentry = debugfs_create_dir("bist", hisi_hba->debugfs_dir); debugfs_create_file("link_rate", 0600, hisi_hba->debugfs_bist_dentry, hisi_hba, &debugfs_bist_linkrate_v3_hw_fops); debugfs_create_file("code_mode", 0600, hisi_hba->debugfs_bist_dentry, hisi_hba, &debugfs_bist_code_mode_v3_hw_fops); debugfs_create_file("fixed_code", 0600, hisi_hba->debugfs_bist_dentry, &hisi_hba->debugfs_bist_fixed_code[0], &debugfs_v3_hw_fops); debugfs_create_file("fixed_code_1", 0600, hisi_hba->debugfs_bist_dentry, &hisi_hba->debugfs_bist_fixed_code[1], &debugfs_v3_hw_fops); debugfs_create_file("phy_id", 0600, hisi_hba->debugfs_bist_dentry, hisi_hba, &debugfs_bist_phy_v3_hw_fops); debugfs_create_file("cnt", 0600, hisi_hba->debugfs_bist_dentry, hisi_hba, &debugfs_bist_cnt_v3_hw_ops); debugfs_create_file("loopback_mode", 0600, hisi_hba->debugfs_bist_dentry, hisi_hba, &debugfs_bist_mode_v3_hw_fops); debugfs_create_file("enable", 0600, hisi_hba->debugfs_bist_dentry, hisi_hba, &debugfs_bist_enable_v3_hw_fops); ports_dentry = debugfs_create_dir("port", hisi_hba->debugfs_bist_dentry); for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { struct dentry *port_dentry; struct dentry *ffe_dentry; char name[256]; int i; snprintf(name, 256, "%d", phy_no); port_dentry = debugfs_create_dir(name, ports_dentry); ffe_dentry = debugfs_create_dir("ffe", port_dentry); for (i = 0; i < FFE_CFG_MAX; i++) { if (i == FFE_RESV) continue; debugfs_create_file(debugfs_ffe_name_v3_hw[i].name, 0600, ffe_dentry, &hisi_hba->debugfs_bist_ffe[phy_no][i], &debugfs_v3_hw_fops); } } hisi_hba->debugfs_bist_linkrate = SAS_LINK_RATE_1_5_GBPS; } static void debugfs_init_v3_hw(struct hisi_hba *hisi_hba) { struct device *dev = hisi_hba->dev; int i; hisi_hba->debugfs_dir = debugfs_create_dir(dev_name(dev), hisi_sas_debugfs_dir); debugfs_create_file("trigger_dump", 0200, hisi_hba->debugfs_dir, hisi_hba, &debugfs_trigger_dump_v3_hw_fops); /* create bist structures */ debugfs_bist_init_v3_hw(hisi_hba); hisi_hba->debugfs_dump_dentry = debugfs_create_dir("dump", hisi_hba->debugfs_dir); debugfs_phy_down_cnt_init_v3_hw(hisi_hba); debugfs_fifo_init_v3_hw(hisi_hba); for (i = 0; i < hisi_sas_debugfs_dump_count; i++) { if (debugfs_alloc_v3_hw(hisi_hba, i)) { debugfs_remove_recursive(hisi_hba->debugfs_dir); dev_dbg(dev, "failed to init debugfs!\n"); break; } } } static void debugfs_exit_v3_hw(struct hisi_hba *hisi_hba) { debugfs_remove_recursive(hisi_hba->debugfs_dir); } static int hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct Scsi_Host *shost; struct hisi_hba *hisi_hba; struct device *dev = &pdev->dev; struct asd_sas_phy **arr_phy; struct asd_sas_port **arr_port; struct sas_ha_struct *sha; int rc, phy_nr, port_nr, i; rc = pcim_enable_device(pdev); if (rc) goto err_out; pci_set_master(pdev); rc = pcim_iomap_regions(pdev, 1 << BAR_NO_V3_HW, DRV_NAME); if (rc) goto err_out; rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (rc) { dev_err(dev, "No usable DMA addressing method\n"); rc = -ENODEV; goto err_out; } shost = hisi_sas_shost_alloc_pci(pdev); if (!shost) { rc = -ENOMEM; goto err_out; } sha = SHOST_TO_SAS_HA(shost); hisi_hba = shost_priv(shost); dev_set_drvdata(dev, sha); hisi_hba->regs = pcim_iomap_table(pdev)[BAR_NO_V3_HW]; if (!hisi_hba->regs) { dev_err(dev, "cannot map register\n"); rc = -ENOMEM; goto err_out_free_host; } phy_nr = port_nr = hisi_hba->n_phy; arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL); arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL); if (!arr_phy || !arr_port) { rc = -ENOMEM; goto err_out_free_host; } sha->sas_phy = arr_phy; sha->sas_port = arr_port; sha->shost = shost; sha->lldd_ha = hisi_hba; shost->transportt = hisi_sas_stt; shost->max_id = HISI_SAS_MAX_DEVICES; shost->max_lun = ~0; shost->max_channel = 1; shost->max_cmd_len = 16; shost->can_queue = HISI_SAS_UNRESERVED_IPTT; shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT; if (hisi_hba->iopoll_q_cnt) shost->nr_maps = 3; else shost->nr_maps = 1; sha->sas_ha_name = DRV_NAME; sha->dev = dev; sha->sas_addr = &hisi_hba->sas_addr[0]; sha->num_phys = hisi_hba->n_phy; for (i = 0; i < hisi_hba->n_phy; i++) { sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy; sha->sas_port[i] = &hisi_hba->port[i].sas_port; } if (hisi_hba->prot_mask) { dev_info(dev, "Registering for DIF/DIX prot_mask=0x%x\n", prot_mask); scsi_host_set_prot(hisi_hba->shost, prot_mask); if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK) scsi_host_set_guard(hisi_hba->shost, SHOST_DIX_GUARD_CRC); } if (hisi_sas_debugfs_enable) debugfs_init_v3_hw(hisi_hba); rc = interrupt_preinit_v3_hw(hisi_hba); if (rc) goto err_out_undo_debugfs; rc = scsi_add_host(shost, dev); if (rc) goto err_out_undo_debugfs; rc = sas_register_ha(sha); if (rc) goto err_out_remove_host; rc = hisi_sas_v3_init(hisi_hba); if (rc) goto err_out_unregister_ha; scsi_scan_host(shost); pm_runtime_set_autosuspend_delay(dev, 5000); pm_runtime_use_autosuspend(dev); /* * For the situation that there are ATA disks connected with SAS * controller, it additionally creates ata_port which will affect the * child_count of hisi_hba->dev. Even if suspended all the disks, * ata_port is still and the child_count of hisi_hba->dev is not 0. * So use pm_suspend_ignore_children() to ignore the effect to * hisi_hba->dev. */ pm_suspend_ignore_children(dev, true); pm_runtime_put_noidle(&pdev->dev); return 0; err_out_unregister_ha: sas_unregister_ha(sha); err_out_remove_host: scsi_remove_host(shost); err_out_undo_debugfs: debugfs_exit_v3_hw(hisi_hba); err_out_free_host: hisi_sas_free(hisi_hba); scsi_host_put(shost); err_out: return rc; } static void hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba) { int i; devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 1), hisi_hba); devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 2), hisi_hba); devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 11), hisi_hba); for (i = 0; i < hisi_hba->cq_nvecs; i++) { struct hisi_sas_cq *cq = &hisi_hba->cq[i]; int nr = hisi_sas_intr_conv ? 16 : 16 + i; devm_free_irq(&pdev->dev, pci_irq_vector(pdev, nr), cq); } } static void hisi_sas_v3_remove(struct pci_dev *pdev) { struct device *dev = &pdev->dev; struct sas_ha_struct *sha = dev_get_drvdata(dev); struct hisi_hba *hisi_hba = sha->lldd_ha; struct Scsi_Host *shost = sha->shost; pm_runtime_get_noresume(dev); del_timer_sync(&hisi_hba->timer); sas_unregister_ha(sha); flush_workqueue(hisi_hba->wq); sas_remove_host(shost); hisi_sas_v3_destroy_irqs(pdev, hisi_hba); hisi_sas_free(hisi_hba); debugfs_exit_v3_hw(hisi_hba); scsi_host_put(shost); } static void hisi_sas_reset_prepare_v3_hw(struct pci_dev *pdev) { struct sas_ha_struct *sha = pci_get_drvdata(pdev); struct hisi_hba *hisi_hba = sha->lldd_ha; struct device *dev = hisi_hba->dev; int rc; dev_info(dev, "FLR prepare\n"); down(&hisi_hba->sem); set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); hisi_sas_controller_reset_prepare(hisi_hba); interrupt_disable_v3_hw(hisi_hba); rc = disable_host_v3_hw(hisi_hba); if (rc) dev_err(dev, "FLR: disable host failed rc=%d\n", rc); } static void hisi_sas_reset_done_v3_hw(struct pci_dev *pdev) { struct sas_ha_struct *sha = pci_get_drvdata(pdev); struct hisi_hba *hisi_hba = sha->lldd_ha; struct device *dev = hisi_hba->dev; int rc; hisi_sas_init_mem(hisi_hba); rc = hw_init_v3_hw(hisi_hba); if (rc) { dev_err(dev, "FLR: hw init failed rc=%d\n", rc); return; } hisi_sas_controller_reset_done(hisi_hba); dev_info(dev, "FLR done\n"); } enum { /* instances of the controller */ hip08, }; static void enable_host_v3_hw(struct hisi_hba *hisi_hba) { u32 reg_val; hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, (u32)((1ULL << hisi_hba->queue_count) - 1)); phys_init_v3_hw(hisi_hba); reg_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE + AM_CTRL_GLOBAL); reg_val &= ~AM_CTRL_SHUTDOWN_REQ_MSK; hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + AM_CTRL_GLOBAL, reg_val); } static int _suspend_v3_hw(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct sas_ha_struct *sha = pci_get_drvdata(pdev); struct hisi_hba *hisi_hba = sha->lldd_ha; struct device *dev = hisi_hba->dev; struct Scsi_Host *shost = hisi_hba->shost; int rc; if (!pdev->pm_cap) { dev_err(dev, "PCI PM not supported\n"); return -ENODEV; } if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) return -1; dev_warn(dev, "entering suspend state\n"); scsi_block_requests(shost); set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); flush_workqueue(hisi_hba->wq); interrupt_disable_v3_hw(hisi_hba); #ifdef CONFIG_PM if (atomic_read(&device->power.usage_count)) { dev_err(dev, "PM suspend: host status cannot be suspended\n"); rc = -EBUSY; goto err_out; } #endif rc = disable_host_v3_hw(hisi_hba); if (rc) { dev_err(dev, "PM suspend: disable host failed rc=%d\n", rc); goto err_out_recover_host; } hisi_sas_init_mem(hisi_hba); hisi_sas_release_tasks(hisi_hba); sas_suspend_ha(sha); dev_warn(dev, "end of suspending controller\n"); return 0; err_out_recover_host: enable_host_v3_hw(hisi_hba); #ifdef CONFIG_PM err_out: #endif interrupt_enable_v3_hw(hisi_hba); clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); scsi_unblock_requests(shost); return rc; } static int _resume_v3_hw(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct sas_ha_struct *sha = pci_get_drvdata(pdev); struct hisi_hba *hisi_hba = sha->lldd_ha; struct Scsi_Host *shost = hisi_hba->shost; struct device *dev = hisi_hba->dev; unsigned int rc; pci_power_t device_state = pdev->current_state; dev_warn(dev, "resuming from operating state [D%d]\n", device_state); scsi_unblock_requests(shost); clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); sas_prep_resume_ha(sha); rc = hw_init_v3_hw(hisi_hba); if (rc) { scsi_remove_host(shost); return rc; } phys_init_v3_hw(hisi_hba); /* * If a directly-attached disk is removed during suspend, a deadlock * may occur, as the PHYE_RESUME_TIMEOUT processing will require the * hisi_hba->device to be active, which can only happen when resume * completes. So don't wait for the HA event workqueue to drain upon * resume. */ sas_resume_ha_no_sync(sha); clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); dev_warn(dev, "end of resuming controller\n"); return 0; } static int __maybe_unused suspend_v3_hw(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct sas_ha_struct *sha = pci_get_drvdata(pdev); struct hisi_hba *hisi_hba = sha->lldd_ha; int rc; set_bit(HISI_SAS_PM_BIT, &hisi_hba->flags); rc = _suspend_v3_hw(device); if (rc) clear_bit(HISI_SAS_PM_BIT, &hisi_hba->flags); return rc; } static int __maybe_unused resume_v3_hw(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct sas_ha_struct *sha = pci_get_drvdata(pdev); struct hisi_hba *hisi_hba = sha->lldd_ha; int rc = _resume_v3_hw(device); clear_bit(HISI_SAS_PM_BIT, &hisi_hba->flags); return rc; } static const struct pci_device_id sas_v3_pci_table[] = { { PCI_VDEVICE(HUAWEI, 0xa230), hip08 }, {} }; MODULE_DEVICE_TABLE(pci, sas_v3_pci_table); static const struct pci_error_handlers hisi_sas_err_handler = { .reset_prepare = hisi_sas_reset_prepare_v3_hw, .reset_done = hisi_sas_reset_done_v3_hw, }; static UNIVERSAL_DEV_PM_OPS(hisi_sas_v3_pm_ops, suspend_v3_hw, resume_v3_hw, NULL); static struct pci_driver sas_v3_pci_driver = { .name = DRV_NAME, .id_table = sas_v3_pci_table, .probe = hisi_sas_v3_probe, .remove = hisi_sas_v3_remove, .err_handler = &hisi_sas_err_handler, .driver.pm = &hisi_sas_v3_pm_ops, }; module_pci_driver(sas_v3_pci_driver); module_param_named(intr_conv, hisi_sas_intr_conv, bool, 0444); MODULE_LICENSE("GPL"); MODULE_AUTHOR("John Garry <[email protected]>"); MODULE_DESCRIPTION("HISILICON SAS controller v3 hw driver based on pci device"); MODULE_ALIAS("pci:" DRV_NAME);
linux-master
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2015 Linaro Ltd. * Copyright (c) 2015 Hisilicon Limited. */ #include "hisi_sas.h" #define DRV_NAME "hisi_sas_v1_hw" /* global registers need init*/ #define DLVRY_QUEUE_ENABLE 0x0 #define IOST_BASE_ADDR_LO 0x8 #define IOST_BASE_ADDR_HI 0xc #define ITCT_BASE_ADDR_LO 0x10 #define ITCT_BASE_ADDR_HI 0x14 #define BROKEN_MSG_ADDR_LO 0x18 #define BROKEN_MSG_ADDR_HI 0x1c #define PHY_CONTEXT 0x20 #define PHY_STATE 0x24 #define PHY_PORT_NUM_MA 0x28 #define PORT_STATE 0x2c #define PHY_CONN_RATE 0x30 #define HGC_TRANS_TASK_CNT_LIMIT 0x38 #define AXI_AHB_CLK_CFG 0x3c #define HGC_SAS_TXFAIL_RETRY_CTRL 0x84 #define HGC_GET_ITV_TIME 0x90 #define DEVICE_MSG_WORK_MODE 0x94 #define I_T_NEXUS_LOSS_TIME 0xa0 #define BUS_INACTIVE_LIMIT_TIME 0xa8 #define REJECT_TO_OPEN_LIMIT_TIME 0xac #define CFG_AGING_TIME 0xbc #define CFG_AGING_TIME_ITCT_REL_OFF 0 #define CFG_AGING_TIME_ITCT_REL_MSK (0x1 << CFG_AGING_TIME_ITCT_REL_OFF) #define HGC_DFX_CFG2 0xc0 #define FIS_LIST_BADDR_L 0xc4 #define CFG_1US_TIMER_TRSH 0xcc #define CFG_SAS_CONFIG 0xd4 #define HGC_IOST_ECC_ADDR 0x140 #define HGC_IOST_ECC_ADDR_BAD_OFF 16 #define HGC_IOST_ECC_ADDR_BAD_MSK (0x3ff << HGC_IOST_ECC_ADDR_BAD_OFF) #define HGC_DQ_ECC_ADDR 0x144 #define HGC_DQ_ECC_ADDR_BAD_OFF 16 #define HGC_DQ_ECC_ADDR_BAD_MSK (0xfff << HGC_DQ_ECC_ADDR_BAD_OFF) #define HGC_INVLD_DQE_INFO 0x148 #define HGC_INVLD_DQE_INFO_DQ_OFF 0 #define HGC_INVLD_DQE_INFO_DQ_MSK (0xffff << HGC_INVLD_DQE_INFO_DQ_OFF) #define HGC_INVLD_DQE_INFO_TYPE_OFF 16 #define HGC_INVLD_DQE_INFO_TYPE_MSK (0x1 << HGC_INVLD_DQE_INFO_TYPE_OFF) #define HGC_INVLD_DQE_INFO_FORCE_OFF 17 #define HGC_INVLD_DQE_INFO_FORCE_MSK (0x1 << HGC_INVLD_DQE_INFO_FORCE_OFF) #define HGC_INVLD_DQE_INFO_PHY_OFF 18 #define HGC_INVLD_DQE_INFO_PHY_MSK (0x1 << HGC_INVLD_DQE_INFO_PHY_OFF) #define HGC_INVLD_DQE_INFO_ABORT_OFF 19 #define HGC_INVLD_DQE_INFO_ABORT_MSK (0x1 << HGC_INVLD_DQE_INFO_ABORT_OFF) #define HGC_INVLD_DQE_INFO_IPTT_OF_OFF 20 #define HGC_INVLD_DQE_INFO_IPTT_OF_MSK (0x1 << HGC_INVLD_DQE_INFO_IPTT_OF_OFF) #define HGC_INVLD_DQE_INFO_SSP_ERR_OFF 21 #define HGC_INVLD_DQE_INFO_SSP_ERR_MSK (0x1 << HGC_INVLD_DQE_INFO_SSP_ERR_OFF) #define HGC_INVLD_DQE_INFO_OFL_OFF 22 #define HGC_INVLD_DQE_INFO_OFL_MSK (0x1 << HGC_INVLD_DQE_INFO_OFL_OFF) #define HGC_ITCT_ECC_ADDR 0x150 #define HGC_ITCT_ECC_ADDR_BAD_OFF 16 #define HGC_ITCT_ECC_ADDR_BAD_MSK (0x3ff << HGC_ITCT_ECC_ADDR_BAD_OFF) #define HGC_AXI_FIFO_ERR_INFO 0x154 #define INT_COAL_EN 0x1bc #define OQ_INT_COAL_TIME 0x1c0 #define OQ_INT_COAL_CNT 0x1c4 #define ENT_INT_COAL_TIME 0x1c8 #define ENT_INT_COAL_CNT 0x1cc #define OQ_INT_SRC 0x1d0 #define OQ_INT_SRC_MSK 0x1d4 #define ENT_INT_SRC1 0x1d8 #define ENT_INT_SRC2 0x1dc #define ENT_INT_SRC2_DQ_CFG_ERR_OFF 25 #define ENT_INT_SRC2_DQ_CFG_ERR_MSK (0x1 << ENT_INT_SRC2_DQ_CFG_ERR_OFF) #define ENT_INT_SRC2_CQ_CFG_ERR_OFF 27 #define ENT_INT_SRC2_CQ_CFG_ERR_MSK (0x1 << ENT_INT_SRC2_CQ_CFG_ERR_OFF) #define ENT_INT_SRC2_AXI_WRONG_INT_OFF 28 #define ENT_INT_SRC2_AXI_WRONG_INT_MSK (0x1 << ENT_INT_SRC2_AXI_WRONG_INT_OFF) #define ENT_INT_SRC2_AXI_OVERLF_INT_OFF 29 #define ENT_INT_SRC2_AXI_OVERLF_INT_MSK (0x1 << ENT_INT_SRC2_AXI_OVERLF_INT_OFF) #define ENT_INT_SRC_MSK1 0x1e0 #define ENT_INT_SRC_MSK2 0x1e4 #define SAS_ECC_INTR 0x1e8 #define SAS_ECC_INTR_DQ_ECC1B_OFF 0 #define SAS_ECC_INTR_DQ_ECC1B_MSK (0x1 << SAS_ECC_INTR_DQ_ECC1B_OFF) #define SAS_ECC_INTR_DQ_ECCBAD_OFF 1 #define SAS_ECC_INTR_DQ_ECCBAD_MSK (0x1 << SAS_ECC_INTR_DQ_ECCBAD_OFF) #define SAS_ECC_INTR_IOST_ECC1B_OFF 2 #define SAS_ECC_INTR_IOST_ECC1B_MSK (0x1 << SAS_ECC_INTR_IOST_ECC1B_OFF) #define SAS_ECC_INTR_IOST_ECCBAD_OFF 3 #define SAS_ECC_INTR_IOST_ECCBAD_MSK (0x1 << SAS_ECC_INTR_IOST_ECCBAD_OFF) #define SAS_ECC_INTR_ITCT_ECC1B_OFF 4 #define SAS_ECC_INTR_ITCT_ECC1B_MSK (0x1 << SAS_ECC_INTR_ITCT_ECC1B_OFF) #define SAS_ECC_INTR_ITCT_ECCBAD_OFF 5 #define SAS_ECC_INTR_ITCT_ECCBAD_MSK (0x1 << SAS_ECC_INTR_ITCT_ECCBAD_OFF) #define SAS_ECC_INTR_MSK 0x1ec #define HGC_ERR_STAT_EN 0x238 #define DLVRY_Q_0_BASE_ADDR_LO 0x260 #define DLVRY_Q_0_BASE_ADDR_HI 0x264 #define DLVRY_Q_0_DEPTH 0x268 #define DLVRY_Q_0_WR_PTR 0x26c #define DLVRY_Q_0_RD_PTR 0x270 #define COMPL_Q_0_BASE_ADDR_LO 0x4e0 #define COMPL_Q_0_BASE_ADDR_HI 0x4e4 #define COMPL_Q_0_DEPTH 0x4e8 #define COMPL_Q_0_WR_PTR 0x4ec #define COMPL_Q_0_RD_PTR 0x4f0 #define HGC_ECC_ERR 0x7d0 /* phy registers need init */ #define PORT_BASE (0x800) #define PHY_CFG (PORT_BASE + 0x0) #define PHY_CFG_ENA_OFF 0 #define PHY_CFG_ENA_MSK (0x1 << PHY_CFG_ENA_OFF) #define PHY_CFG_DC_OPT_OFF 2 #define PHY_CFG_DC_OPT_MSK (0x1 << PHY_CFG_DC_OPT_OFF) #define PROG_PHY_LINK_RATE (PORT_BASE + 0xc) #define PROG_PHY_LINK_RATE_MAX_OFF 0 #define PROG_PHY_LINK_RATE_MAX_MSK (0xf << PROG_PHY_LINK_RATE_MAX_OFF) #define PROG_PHY_LINK_RATE_MIN_OFF 4 #define PROG_PHY_LINK_RATE_MIN_MSK (0xf << PROG_PHY_LINK_RATE_MIN_OFF) #define PROG_PHY_LINK_RATE_OOB_OFF 8 #define PROG_PHY_LINK_RATE_OOB_MSK (0xf << PROG_PHY_LINK_RATE_OOB_OFF) #define PHY_CTRL (PORT_BASE + 0x14) #define PHY_CTRL_RESET_OFF 0 #define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF) #define PHY_RATE_NEGO (PORT_BASE + 0x30) #define PHY_PCN (PORT_BASE + 0x44) #define SL_TOUT_CFG (PORT_BASE + 0x8c) #define SL_CONTROL (PORT_BASE + 0x94) #define SL_CONTROL_NOTIFY_EN_OFF 0 #define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF) #define TX_ID_DWORD0 (PORT_BASE + 0x9c) #define TX_ID_DWORD1 (PORT_BASE + 0xa0) #define TX_ID_DWORD2 (PORT_BASE + 0xa4) #define TX_ID_DWORD3 (PORT_BASE + 0xa8) #define TX_ID_DWORD4 (PORT_BASE + 0xaC) #define TX_ID_DWORD5 (PORT_BASE + 0xb0) #define TX_ID_DWORD6 (PORT_BASE + 0xb4) #define RX_IDAF_DWORD0 (PORT_BASE + 0xc4) #define RX_IDAF_DWORD1 (PORT_BASE + 0xc8) #define RX_IDAF_DWORD2 (PORT_BASE + 0xcc) #define RX_IDAF_DWORD3 (PORT_BASE + 0xd0) #define RX_IDAF_DWORD4 (PORT_BASE + 0xd4) #define RX_IDAF_DWORD5 (PORT_BASE + 0xd8) #define RX_IDAF_DWORD6 (PORT_BASE + 0xdc) #define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc) #define DONE_RECEIVED_TIME (PORT_BASE + 0x12c) #define CON_CFG_DRIVER (PORT_BASE + 0x130) #define PHY_CONFIG2 (PORT_BASE + 0x1a8) #define PHY_CONFIG2_FORCE_TXDEEMPH_OFF 3 #define PHY_CONFIG2_FORCE_TXDEEMPH_MSK (0x1 << PHY_CONFIG2_FORCE_TXDEEMPH_OFF) #define PHY_CONFIG2_TX_TRAIN_COMP_OFF 24 #define PHY_CONFIG2_TX_TRAIN_COMP_MSK (0x1 << PHY_CONFIG2_TX_TRAIN_COMP_OFF) #define CHL_INT0 (PORT_BASE + 0x1b0) #define CHL_INT0_PHYCTRL_NOTRDY_OFF 0 #define CHL_INT0_PHYCTRL_NOTRDY_MSK (0x1 << CHL_INT0_PHYCTRL_NOTRDY_OFF) #define CHL_INT0_SN_FAIL_NGR_OFF 2 #define CHL_INT0_SN_FAIL_NGR_MSK (0x1 << CHL_INT0_SN_FAIL_NGR_OFF) #define CHL_INT0_DWS_LOST_OFF 4 #define CHL_INT0_DWS_LOST_MSK (0x1 << CHL_INT0_DWS_LOST_OFF) #define CHL_INT0_SL_IDAF_FAIL_OFF 10 #define CHL_INT0_SL_IDAF_FAIL_MSK (0x1 << CHL_INT0_SL_IDAF_FAIL_OFF) #define CHL_INT0_ID_TIMEOUT_OFF 11 #define CHL_INT0_ID_TIMEOUT_MSK (0x1 << CHL_INT0_ID_TIMEOUT_OFF) #define CHL_INT0_SL_OPAF_FAIL_OFF 12 #define CHL_INT0_SL_OPAF_FAIL_MSK (0x1 << CHL_INT0_SL_OPAF_FAIL_OFF) #define CHL_INT0_SL_PS_FAIL_OFF 21 #define CHL_INT0_SL_PS_FAIL_MSK (0x1 << CHL_INT0_SL_PS_FAIL_OFF) #define CHL_INT1 (PORT_BASE + 0x1b4) #define CHL_INT2 (PORT_BASE + 0x1b8) #define CHL_INT2_SL_RX_BC_ACK_OFF 2 #define CHL_INT2_SL_RX_BC_ACK_MSK (0x1 << CHL_INT2_SL_RX_BC_ACK_OFF) #define CHL_INT2_SL_PHY_ENA_OFF 6 #define CHL_INT2_SL_PHY_ENA_MSK (0x1 << CHL_INT2_SL_PHY_ENA_OFF) #define CHL_INT0_MSK (PORT_BASE + 0x1bc) #define CHL_INT0_MSK_PHYCTRL_NOTRDY_OFF 0 #define CHL_INT0_MSK_PHYCTRL_NOTRDY_MSK (0x1 << CHL_INT0_MSK_PHYCTRL_NOTRDY_OFF) #define CHL_INT1_MSK (PORT_BASE + 0x1c0) #define CHL_INT2_MSK (PORT_BASE + 0x1c4) #define CHL_INT_COAL_EN (PORT_BASE + 0x1d0) #define DMA_TX_STATUS (PORT_BASE + 0x2d0) #define DMA_TX_STATUS_BUSY_OFF 0 #define DMA_TX_STATUS_BUSY_MSK (0x1 << DMA_TX_STATUS_BUSY_OFF) #define DMA_RX_STATUS (PORT_BASE + 0x2e8) #define DMA_RX_STATUS_BUSY_OFF 0 #define DMA_RX_STATUS_BUSY_MSK (0x1 << DMA_RX_STATUS_BUSY_OFF) #define AXI_CFG 0x5100 #define RESET_VALUE 0x7ffff /* HW dma structures */ /* Delivery queue header */ /* dw0 */ #define CMD_HDR_RESP_REPORT_OFF 5 #define CMD_HDR_RESP_REPORT_MSK 0x20 #define CMD_HDR_TLR_CTRL_OFF 6 #define CMD_HDR_TLR_CTRL_MSK 0xc0 #define CMD_HDR_PORT_OFF 17 #define CMD_HDR_PORT_MSK 0xe0000 #define CMD_HDR_PRIORITY_OFF 27 #define CMD_HDR_PRIORITY_MSK 0x8000000 #define CMD_HDR_MODE_OFF 28 #define CMD_HDR_MODE_MSK 0x10000000 #define CMD_HDR_CMD_OFF 29 #define CMD_HDR_CMD_MSK 0xe0000000 /* dw1 */ #define CMD_HDR_VERIFY_DTL_OFF 10 #define CMD_HDR_VERIFY_DTL_MSK 0x400 #define CMD_HDR_SSP_FRAME_TYPE_OFF 13 #define CMD_HDR_SSP_FRAME_TYPE_MSK 0xe000 #define CMD_HDR_DEVICE_ID_OFF 16 #define CMD_HDR_DEVICE_ID_MSK 0xffff0000 /* dw2 */ #define CMD_HDR_CFL_OFF 0 #define CMD_HDR_CFL_MSK 0x1ff #define CMD_HDR_MRFL_OFF 15 #define CMD_HDR_MRFL_MSK 0xff8000 #define CMD_HDR_FIRST_BURST_OFF 25 #define CMD_HDR_FIRST_BURST_MSK 0x2000000 /* dw3 */ #define CMD_HDR_IPTT_OFF 0 #define CMD_HDR_IPTT_MSK 0xffff /* dw6 */ #define CMD_HDR_DATA_SGL_LEN_OFF 16 #define CMD_HDR_DATA_SGL_LEN_MSK 0xffff0000 /* Completion header */ #define CMPLT_HDR_IPTT_OFF 0 #define CMPLT_HDR_IPTT_MSK (0xffff << CMPLT_HDR_IPTT_OFF) #define CMPLT_HDR_CMD_CMPLT_OFF 17 #define CMPLT_HDR_CMD_CMPLT_MSK (0x1 << CMPLT_HDR_CMD_CMPLT_OFF) #define CMPLT_HDR_ERR_RCRD_XFRD_OFF 18 #define CMPLT_HDR_ERR_RCRD_XFRD_MSK (0x1 << CMPLT_HDR_ERR_RCRD_XFRD_OFF) #define CMPLT_HDR_RSPNS_XFRD_OFF 19 #define CMPLT_HDR_RSPNS_XFRD_MSK (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF) #define CMPLT_HDR_IO_CFG_ERR_OFF 27 #define CMPLT_HDR_IO_CFG_ERR_MSK (0x1 << CMPLT_HDR_IO_CFG_ERR_OFF) /* ITCT header */ /* qw0 */ #define ITCT_HDR_DEV_TYPE_OFF 0 #define ITCT_HDR_DEV_TYPE_MSK (0x3ULL << ITCT_HDR_DEV_TYPE_OFF) #define ITCT_HDR_VALID_OFF 2 #define ITCT_HDR_VALID_MSK (0x1ULL << ITCT_HDR_VALID_OFF) #define ITCT_HDR_AWT_CONTROL_OFF 4 #define ITCT_HDR_AWT_CONTROL_MSK (0x1ULL << ITCT_HDR_AWT_CONTROL_OFF) #define ITCT_HDR_MAX_CONN_RATE_OFF 5 #define ITCT_HDR_MAX_CONN_RATE_MSK (0xfULL << ITCT_HDR_MAX_CONN_RATE_OFF) #define ITCT_HDR_VALID_LINK_NUM_OFF 9 #define ITCT_HDR_VALID_LINK_NUM_MSK (0xfULL << ITCT_HDR_VALID_LINK_NUM_OFF) #define ITCT_HDR_PORT_ID_OFF 13 #define ITCT_HDR_PORT_ID_MSK (0x7ULL << ITCT_HDR_PORT_ID_OFF) #define ITCT_HDR_SMP_TIMEOUT_OFF 16 #define ITCT_HDR_SMP_TIMEOUT_MSK (0xffffULL << ITCT_HDR_SMP_TIMEOUT_OFF) /* qw1 */ #define ITCT_HDR_MAX_SAS_ADDR_OFF 0 #define ITCT_HDR_MAX_SAS_ADDR_MSK (0xffffffffffffffff << \ ITCT_HDR_MAX_SAS_ADDR_OFF) /* qw2 */ #define ITCT_HDR_IT_NEXUS_LOSS_TL_OFF 0 #define ITCT_HDR_IT_NEXUS_LOSS_TL_MSK (0xffffULL << \ ITCT_HDR_IT_NEXUS_LOSS_TL_OFF) #define ITCT_HDR_BUS_INACTIVE_TL_OFF 16 #define ITCT_HDR_BUS_INACTIVE_TL_MSK (0xffffULL << \ ITCT_HDR_BUS_INACTIVE_TL_OFF) #define ITCT_HDR_MAX_CONN_TL_OFF 32 #define ITCT_HDR_MAX_CONN_TL_MSK (0xffffULL << \ ITCT_HDR_MAX_CONN_TL_OFF) #define ITCT_HDR_REJ_OPEN_TL_OFF 48 #define ITCT_HDR_REJ_OPEN_TL_MSK (0xffffULL << \ ITCT_HDR_REJ_OPEN_TL_OFF) /* Err record header */ #define ERR_HDR_DMA_TX_ERR_TYPE_OFF 0 #define ERR_HDR_DMA_TX_ERR_TYPE_MSK (0xffff << ERR_HDR_DMA_TX_ERR_TYPE_OFF) #define ERR_HDR_DMA_RX_ERR_TYPE_OFF 16 #define ERR_HDR_DMA_RX_ERR_TYPE_MSK (0xffff << ERR_HDR_DMA_RX_ERR_TYPE_OFF) struct hisi_sas_complete_v1_hdr { __le32 data; }; struct hisi_sas_err_record_v1 { /* dw0 */ __le32 dma_err_type; /* dw1 */ __le32 trans_tx_fail_type; /* dw2 */ __le32 trans_rx_fail_type; /* dw3 */ u32 rsvd; }; enum { HISI_SAS_PHY_BCAST_ACK = 0, HISI_SAS_PHY_SL_PHY_ENABLED, HISI_SAS_PHY_INT_ABNORMAL, HISI_SAS_PHY_INT_NR }; enum { DMA_TX_ERR_BASE = 0x0, DMA_RX_ERR_BASE = 0x100, TRANS_TX_FAIL_BASE = 0x200, TRANS_RX_FAIL_BASE = 0x300, /* dma tx */ DMA_TX_DIF_CRC_ERR = DMA_TX_ERR_BASE, /* 0x0 */ DMA_TX_DIF_APP_ERR, /* 0x1 */ DMA_TX_DIF_RPP_ERR, /* 0x2 */ DMA_TX_AXI_BUS_ERR, /* 0x3 */ DMA_TX_DATA_SGL_OVERFLOW_ERR, /* 0x4 */ DMA_TX_DIF_SGL_OVERFLOW_ERR, /* 0x5 */ DMA_TX_UNEXP_XFER_RDY_ERR, /* 0x6 */ DMA_TX_XFER_RDY_OFFSET_ERR, /* 0x7 */ DMA_TX_DATA_UNDERFLOW_ERR, /* 0x8 */ DMA_TX_XFER_RDY_LENGTH_OVERFLOW_ERR, /* 0x9 */ /* dma rx */ DMA_RX_BUFFER_ECC_ERR = DMA_RX_ERR_BASE, /* 0x100 */ DMA_RX_DIF_CRC_ERR, /* 0x101 */ DMA_RX_DIF_APP_ERR, /* 0x102 */ DMA_RX_DIF_RPP_ERR, /* 0x103 */ DMA_RX_RESP_BUFFER_OVERFLOW_ERR, /* 0x104 */ DMA_RX_AXI_BUS_ERR, /* 0x105 */ DMA_RX_DATA_SGL_OVERFLOW_ERR, /* 0x106 */ DMA_RX_DIF_SGL_OVERFLOW_ERR, /* 0x107 */ DMA_RX_DATA_OFFSET_ERR, /* 0x108 */ DMA_RX_UNEXP_RX_DATA_ERR, /* 0x109 */ DMA_RX_DATA_OVERFLOW_ERR, /* 0x10a */ DMA_RX_DATA_UNDERFLOW_ERR, /* 0x10b */ DMA_RX_UNEXP_RETRANS_RESP_ERR, /* 0x10c */ /* trans tx */ TRANS_TX_RSVD0_ERR = TRANS_TX_FAIL_BASE, /* 0x200 */ TRANS_TX_PHY_NOT_ENABLE_ERR, /* 0x201 */ TRANS_TX_OPEN_REJCT_WRONG_DEST_ERR, /* 0x202 */ TRANS_TX_OPEN_REJCT_ZONE_VIOLATION_ERR, /* 0x203 */ TRANS_TX_OPEN_REJCT_BY_OTHER_ERR, /* 0x204 */ TRANS_TX_RSVD1_ERR, /* 0x205 */ TRANS_TX_OPEN_REJCT_AIP_TIMEOUT_ERR, /* 0x206 */ TRANS_TX_OPEN_REJCT_STP_BUSY_ERR, /* 0x207 */ TRANS_TX_OPEN_REJCT_PROTOCOL_NOT_SUPPORT_ERR, /* 0x208 */ TRANS_TX_OPEN_REJCT_RATE_NOT_SUPPORT_ERR, /* 0x209 */ TRANS_TX_OPEN_REJCT_BAD_DEST_ERR, /* 0x20a */ TRANS_TX_OPEN_BREAK_RECEIVE_ERR, /* 0x20b */ TRANS_TX_LOW_PHY_POWER_ERR, /* 0x20c */ TRANS_TX_OPEN_REJCT_PATHWAY_BLOCKED_ERR, /* 0x20d */ TRANS_TX_OPEN_TIMEOUT_ERR, /* 0x20e */ TRANS_TX_OPEN_REJCT_NO_DEST_ERR, /* 0x20f */ TRANS_TX_OPEN_RETRY_ERR, /* 0x210 */ TRANS_TX_RSVD2_ERR, /* 0x211 */ TRANS_TX_BREAK_TIMEOUT_ERR, /* 0x212 */ TRANS_TX_BREAK_REQUEST_ERR, /* 0x213 */ TRANS_TX_BREAK_RECEIVE_ERR, /* 0x214 */ TRANS_TX_CLOSE_TIMEOUT_ERR, /* 0x215 */ TRANS_TX_CLOSE_NORMAL_ERR, /* 0x216 */ TRANS_TX_CLOSE_PHYRESET_ERR, /* 0x217 */ TRANS_TX_WITH_CLOSE_DWS_TIMEOUT_ERR, /* 0x218 */ TRANS_TX_WITH_CLOSE_COMINIT_ERR, /* 0x219 */ TRANS_TX_NAK_RECEIVE_ERR, /* 0x21a */ TRANS_TX_ACK_NAK_TIMEOUT_ERR, /* 0x21b */ TRANS_TX_CREDIT_TIMEOUT_ERR, /* 0x21c */ TRANS_TX_IPTT_CONFLICT_ERR, /* 0x21d */ TRANS_TX_TXFRM_TYPE_ERR, /* 0x21e */ TRANS_TX_TXSMP_LENGTH_ERR, /* 0x21f */ /* trans rx */ TRANS_RX_FRAME_CRC_ERR = TRANS_RX_FAIL_BASE, /* 0x300 */ TRANS_RX_FRAME_DONE_ERR, /* 0x301 */ TRANS_RX_FRAME_ERRPRM_ERR, /* 0x302 */ TRANS_RX_FRAME_NO_CREDIT_ERR, /* 0x303 */ TRANS_RX_RSVD0_ERR, /* 0x304 */ TRANS_RX_FRAME_OVERRUN_ERR, /* 0x305 */ TRANS_RX_FRAME_NO_EOF_ERR, /* 0x306 */ TRANS_RX_LINK_BUF_OVERRUN_ERR, /* 0x307 */ TRANS_RX_BREAK_TIMEOUT_ERR, /* 0x308 */ TRANS_RX_BREAK_REQUEST_ERR, /* 0x309 */ TRANS_RX_BREAK_RECEIVE_ERR, /* 0x30a */ TRANS_RX_CLOSE_TIMEOUT_ERR, /* 0x30b */ TRANS_RX_CLOSE_NORMAL_ERR, /* 0x30c */ TRANS_RX_CLOSE_PHYRESET_ERR, /* 0x30d */ TRANS_RX_WITH_CLOSE_DWS_TIMEOUT_ERR, /* 0x30e */ TRANS_RX_WITH_CLOSE_COMINIT_ERR, /* 0x30f */ TRANS_RX_DATA_LENGTH0_ERR, /* 0x310 */ TRANS_RX_BAD_HASH_ERR, /* 0x311 */ TRANS_RX_XRDY_ZERO_ERR, /* 0x312 */ TRANS_RX_SSP_FRAME_LEN_ERR, /* 0x313 */ TRANS_RX_TRANS_RX_RSVD1_ERR, /* 0x314 */ TRANS_RX_NO_BALANCE_ERR, /* 0x315 */ TRANS_RX_TRANS_RX_RSVD2_ERR, /* 0x316 */ TRANS_RX_TRANS_RX_RSVD3_ERR, /* 0x317 */ TRANS_RX_BAD_FRAME_TYPE_ERR, /* 0x318 */ TRANS_RX_SMP_FRAME_LEN_ERR, /* 0x319 */ TRANS_RX_SMP_RESP_TIMEOUT_ERR, /* 0x31a */ }; #define HISI_SAS_PHY_MAX_INT_NR (HISI_SAS_PHY_INT_NR * HISI_SAS_MAX_PHYS) #define HISI_SAS_CQ_MAX_INT_NR (HISI_SAS_MAX_QUEUES) #define HISI_SAS_FATAL_INT_NR (2) #define HISI_SAS_MAX_INT_NR \ (HISI_SAS_PHY_MAX_INT_NR + HISI_SAS_CQ_MAX_INT_NR +\ HISI_SAS_FATAL_INT_NR) static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off) { void __iomem *regs = hisi_hba->regs + off; return readl(regs); } static void hisi_sas_write32(struct hisi_hba *hisi_hba, u32 off, u32 val) { void __iomem *regs = hisi_hba->regs + off; writel(val, regs); } static void hisi_sas_phy_write32(struct hisi_hba *hisi_hba, int phy_no, u32 off, u32 val) { void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off; writel(val, regs); } static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba, int phy_no, u32 off) { void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off; return readl(regs); } static void config_phy_opt_mode_v1_hw(struct hisi_hba *hisi_hba, int phy_no) { u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); cfg &= ~PHY_CFG_DC_OPT_MSK; cfg |= 1 << PHY_CFG_DC_OPT_OFF; hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); } static void config_tx_tfe_autoneg_v1_hw(struct hisi_hba *hisi_hba, int phy_no) { u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CONFIG2); cfg &= ~PHY_CONFIG2_FORCE_TXDEEMPH_MSK; hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CONFIG2, cfg); } static void config_id_frame_v1_hw(struct hisi_hba *hisi_hba, int phy_no) { struct sas_identify_frame identify_frame; u32 *identify_buffer; memset(&identify_frame, 0, sizeof(identify_frame)); identify_frame.dev_type = SAS_END_DEVICE; identify_frame.frame_type = 0; identify_frame._un1 = 1; identify_frame.initiator_bits = SAS_PROTOCOL_ALL; identify_frame.target_bits = SAS_PROTOCOL_NONE; memcpy(&identify_frame._un4_11[0], hisi_hba->sas_addr, SAS_ADDR_SIZE); memcpy(&identify_frame.sas_addr[0], hisi_hba->sas_addr, SAS_ADDR_SIZE); identify_frame.phy_id = phy_no; identify_buffer = (u32 *)(&identify_frame); hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0, __swab32(identify_buffer[0])); hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1, __swab32(identify_buffer[1])); hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2, __swab32(identify_buffer[2])); hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3, __swab32(identify_buffer[3])); hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4, __swab32(identify_buffer[4])); hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5, __swab32(identify_buffer[5])); } static void setup_itct_v1_hw(struct hisi_hba *hisi_hba, struct hisi_sas_device *sas_dev) { struct domain_device *device = sas_dev->sas_device; struct device *dev = hisi_hba->dev; u64 qw0, device_id = sas_dev->device_id; struct hisi_sas_itct *itct = &hisi_hba->itct[device_id]; struct asd_sas_port *sas_port = device->port; struct hisi_sas_port *port = to_hisi_sas_port(sas_port); u64 sas_addr; memset(itct, 0, sizeof(*itct)); /* qw0 */ qw0 = 0; switch (sas_dev->dev_type) { case SAS_END_DEVICE: case SAS_EDGE_EXPANDER_DEVICE: case SAS_FANOUT_EXPANDER_DEVICE: qw0 = HISI_SAS_DEV_TYPE_SSP << ITCT_HDR_DEV_TYPE_OFF; break; default: dev_warn(dev, "setup itct: unsupported dev type (%d)\n", sas_dev->dev_type); } qw0 |= ((1 << ITCT_HDR_VALID_OFF) | (1 << ITCT_HDR_AWT_CONTROL_OFF) | (device->max_linkrate << ITCT_HDR_MAX_CONN_RATE_OFF) | (1 << ITCT_HDR_VALID_LINK_NUM_OFF) | (port->id << ITCT_HDR_PORT_ID_OFF)); itct->qw0 = cpu_to_le64(qw0); /* qw1 */ memcpy(&sas_addr, device->sas_addr, SAS_ADDR_SIZE); itct->sas_addr = cpu_to_le64(__swab64(sas_addr)); /* qw2 */ itct->qw2 = cpu_to_le64((500ULL << ITCT_HDR_IT_NEXUS_LOSS_TL_OFF) | (0xff00ULL << ITCT_HDR_BUS_INACTIVE_TL_OFF) | (0xff00ULL << ITCT_HDR_MAX_CONN_TL_OFF) | (0xff00ULL << ITCT_HDR_REJ_OPEN_TL_OFF)); } static int clear_itct_v1_hw(struct hisi_hba *hisi_hba, struct hisi_sas_device *sas_dev) { u64 dev_id = sas_dev->device_id; struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id]; u64 qw0; u32 reg_val = hisi_sas_read32(hisi_hba, CFG_AGING_TIME); reg_val |= CFG_AGING_TIME_ITCT_REL_MSK; hisi_sas_write32(hisi_hba, CFG_AGING_TIME, reg_val); /* free itct */ udelay(1); reg_val = hisi_sas_read32(hisi_hba, CFG_AGING_TIME); reg_val &= ~CFG_AGING_TIME_ITCT_REL_MSK; hisi_sas_write32(hisi_hba, CFG_AGING_TIME, reg_val); qw0 = le64_to_cpu(itct->qw0); qw0 &= ~ITCT_HDR_VALID_MSK; itct->qw0 = cpu_to_le64(qw0); return 0; } static int reset_hw_v1_hw(struct hisi_hba *hisi_hba) { int i; unsigned long end_time; u32 val; struct device *dev = hisi_hba->dev; for (i = 0; i < hisi_hba->n_phy; i++) { u32 phy_ctrl = hisi_sas_phy_read32(hisi_hba, i, PHY_CTRL); phy_ctrl |= PHY_CTRL_RESET_MSK; hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL, phy_ctrl); } msleep(1); /* It is safe to wait for 50us */ /* Ensure DMA tx & rx idle */ for (i = 0; i < hisi_hba->n_phy; i++) { u32 dma_tx_status, dma_rx_status; end_time = jiffies + msecs_to_jiffies(1000); while (1) { dma_tx_status = hisi_sas_phy_read32(hisi_hba, i, DMA_TX_STATUS); dma_rx_status = hisi_sas_phy_read32(hisi_hba, i, DMA_RX_STATUS); if (!(dma_tx_status & DMA_TX_STATUS_BUSY_MSK) && !(dma_rx_status & DMA_RX_STATUS_BUSY_MSK)) break; msleep(20); if (time_after(jiffies, end_time)) return -EIO; } } /* Ensure axi bus idle */ end_time = jiffies + msecs_to_jiffies(1000); while (1) { u32 axi_status = hisi_sas_read32(hisi_hba, AXI_CFG); if (axi_status == 0) break; msleep(20); if (time_after(jiffies, end_time)) return -EIO; } if (ACPI_HANDLE(dev)) { acpi_status s; s = acpi_evaluate_object(ACPI_HANDLE(dev), "_RST", NULL, NULL); if (ACPI_FAILURE(s)) { dev_err(dev, "Reset failed\n"); return -EIO; } } else if (hisi_hba->ctrl) { /* Apply reset and disable clock */ /* clk disable reg is offset by +4 bytes from clk enable reg */ regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg, RESET_VALUE); regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg + 4, RESET_VALUE); msleep(1); regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, &val); if (RESET_VALUE != (val & RESET_VALUE)) { dev_err(dev, "Reset failed\n"); return -EIO; } /* De-reset and enable clock */ /* deassert rst reg is offset by +4 bytes from assert reg */ regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg + 4, RESET_VALUE); regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg, RESET_VALUE); msleep(1); regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, &val); if (val & RESET_VALUE) { dev_err(dev, "De-reset failed\n"); return -EIO; } } else { dev_warn(dev, "no reset method\n"); return -EINVAL; } return 0; } static void init_reg_v1_hw(struct hisi_hba *hisi_hba) { int i; /* Global registers init*/ hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, (u32)((1ULL << hisi_hba->queue_count) - 1)); hisi_sas_write32(hisi_hba, HGC_TRANS_TASK_CNT_LIMIT, 0x11); hisi_sas_write32(hisi_hba, DEVICE_MSG_WORK_MODE, 0x1); hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x1ff); hisi_sas_write32(hisi_hba, HGC_ERR_STAT_EN, 0x401); hisi_sas_write32(hisi_hba, CFG_1US_TIMER_TRSH, 0x64); hisi_sas_write32(hisi_hba, HGC_GET_ITV_TIME, 0x1); hisi_sas_write32(hisi_hba, I_T_NEXUS_LOSS_TIME, 0x64); hisi_sas_write32(hisi_hba, BUS_INACTIVE_LIMIT_TIME, 0x2710); hisi_sas_write32(hisi_hba, REJECT_TO_OPEN_LIMIT_TIME, 0x1); hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x7a12); hisi_sas_write32(hisi_hba, HGC_DFX_CFG2, 0x9c40); hisi_sas_write32(hisi_hba, FIS_LIST_BADDR_L, 0x2); hisi_sas_write32(hisi_hba, INT_COAL_EN, 0xc); hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x186a0); hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 1); hisi_sas_write32(hisi_hba, ENT_INT_COAL_TIME, 0x1); hisi_sas_write32(hisi_hba, ENT_INT_COAL_CNT, 0x1); hisi_sas_write32(hisi_hba, OQ_INT_SRC, 0xffffffff); hisi_sas_write32(hisi_hba, OQ_INT_SRC_MSK, 0); hisi_sas_write32(hisi_hba, ENT_INT_SRC1, 0xffffffff); hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0); hisi_sas_write32(hisi_hba, ENT_INT_SRC2, 0xffffffff); hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0); hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0); hisi_sas_write32(hisi_hba, AXI_AHB_CLK_CFG, 0x2); hisi_sas_write32(hisi_hba, CFG_SAS_CONFIG, 0x22000000); for (i = 0; i < hisi_hba->n_phy; i++) { hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, 0x88a); hisi_sas_phy_write32(hisi_hba, i, PHY_CONFIG2, 0x7c080); hisi_sas_phy_write32(hisi_hba, i, PHY_RATE_NEGO, 0x415ee00); hisi_sas_phy_write32(hisi_hba, i, PHY_PCN, 0x80a80000); hisi_sas_phy_write32(hisi_hba, i, SL_TOUT_CFG, 0x7d7d7d7d); hisi_sas_phy_write32(hisi_hba, i, DONE_RECEIVED_TIME, 0x0); hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000); hisi_sas_phy_write32(hisi_hba, i, DONE_RECEIVED_TIME, 0); hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER, 0x13f0a); hisi_sas_phy_write32(hisi_hba, i, CHL_INT_COAL_EN, 3); hisi_sas_phy_write32(hisi_hba, i, DONE_RECEIVED_TIME, 8); } for (i = 0; i < hisi_hba->queue_count; i++) { /* Delivery queue */ hisi_sas_write32(hisi_hba, DLVRY_Q_0_BASE_ADDR_HI + (i * 0x14), upper_32_bits(hisi_hba->cmd_hdr_dma[i])); hisi_sas_write32(hisi_hba, DLVRY_Q_0_BASE_ADDR_LO + (i * 0x14), lower_32_bits(hisi_hba->cmd_hdr_dma[i])); hisi_sas_write32(hisi_hba, DLVRY_Q_0_DEPTH + (i * 0x14), HISI_SAS_QUEUE_SLOTS); /* Completion queue */ hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_HI + (i * 0x14), upper_32_bits(hisi_hba->complete_hdr_dma[i])); hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_LO + (i * 0x14), lower_32_bits(hisi_hba->complete_hdr_dma[i])); hisi_sas_write32(hisi_hba, COMPL_Q_0_DEPTH + (i * 0x14), HISI_SAS_QUEUE_SLOTS); } /* itct */ hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_LO, lower_32_bits(hisi_hba->itct_dma)); hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_HI, upper_32_bits(hisi_hba->itct_dma)); /* iost */ hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_LO, lower_32_bits(hisi_hba->iost_dma)); hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_HI, upper_32_bits(hisi_hba->iost_dma)); /* breakpoint */ hisi_sas_write32(hisi_hba, BROKEN_MSG_ADDR_LO, lower_32_bits(hisi_hba->breakpoint_dma)); hisi_sas_write32(hisi_hba, BROKEN_MSG_ADDR_HI, upper_32_bits(hisi_hba->breakpoint_dma)); } static int hw_init_v1_hw(struct hisi_hba *hisi_hba) { struct device *dev = hisi_hba->dev; int rc; rc = reset_hw_v1_hw(hisi_hba); if (rc) { dev_err(dev, "hisi_sas_reset_hw failed, rc=%d\n", rc); return rc; } msleep(100); init_reg_v1_hw(hisi_hba); return 0; } static void enable_phy_v1_hw(struct hisi_hba *hisi_hba, int phy_no) { u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); cfg |= PHY_CFG_ENA_MSK; hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); } static void disable_phy_v1_hw(struct hisi_hba *hisi_hba, int phy_no) { u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); cfg &= ~PHY_CFG_ENA_MSK; hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); } static void start_phy_v1_hw(struct hisi_hba *hisi_hba, int phy_no) { config_id_frame_v1_hw(hisi_hba, phy_no); config_phy_opt_mode_v1_hw(hisi_hba, phy_no); config_tx_tfe_autoneg_v1_hw(hisi_hba, phy_no); enable_phy_v1_hw(hisi_hba, phy_no); } static void phy_hard_reset_v1_hw(struct hisi_hba *hisi_hba, int phy_no) { hisi_sas_phy_enable(hisi_hba, phy_no, 0); msleep(100); hisi_sas_phy_enable(hisi_hba, phy_no, 1); } static void start_phys_v1_hw(struct timer_list *t) { struct hisi_hba *hisi_hba = from_timer(hisi_hba, t, timer); int i; for (i = 0; i < hisi_hba->n_phy; i++) { hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x12a); hisi_sas_phy_enable(hisi_hba, i, 1); } } static void phys_init_v1_hw(struct hisi_hba *hisi_hba) { int i; struct timer_list *timer = &hisi_hba->timer; for (i = 0; i < hisi_hba->n_phy; i++) { hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x6a); hisi_sas_phy_read32(hisi_hba, i, CHL_INT2_MSK); } timer_setup(timer, start_phys_v1_hw, 0); mod_timer(timer, jiffies + HZ); } static void sl_notify_ssp_v1_hw(struct hisi_hba *hisi_hba, int phy_no) { u32 sl_control; sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); sl_control |= SL_CONTROL_NOTIFY_EN_MSK; hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); msleep(1); sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); sl_control &= ~SL_CONTROL_NOTIFY_EN_MSK; hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); } static enum sas_linkrate phy_get_max_linkrate_v1_hw(void) { return SAS_LINK_RATE_6_0_GBPS; } static void phy_set_linkrate_v1_hw(struct hisi_hba *hisi_hba, int phy_no, struct sas_phy_linkrates *r) { enum sas_linkrate max = r->maximum_linkrate; u32 prog_phy_link_rate = 0x800; prog_phy_link_rate |= hisi_sas_get_prog_phy_linkrate_mask(max); hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE, prog_phy_link_rate); } static int get_wideport_bitmap_v1_hw(struct hisi_hba *hisi_hba, int port_id) { int i, bitmap = 0; u32 phy_port_num_ma = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); for (i = 0; i < hisi_hba->n_phy; i++) if (((phy_port_num_ma >> (i * 4)) & 0xf) == port_id) bitmap |= 1 << i; return bitmap; } /* DQ lock must be taken here */ static void start_delivery_v1_hw(struct hisi_sas_dq *dq) { struct hisi_hba *hisi_hba = dq->hisi_hba; struct hisi_sas_slot *s, *s1, *s2 = NULL; int dlvry_queue = dq->id; int wp; list_for_each_entry_safe(s, s1, &dq->list, delivery) { if (!s->ready) break; s2 = s; list_del(&s->delivery); } if (!s2) return; /* * Ensure that memories for slots built on other CPUs is observed. */ smp_rmb(); wp = (s2->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS; hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp); } static void prep_prd_sge_v1_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot, struct hisi_sas_cmd_hdr *hdr, struct scatterlist *scatter, int n_elem) { struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot); struct scatterlist *sg; int i; for_each_sg(scatter, sg, n_elem, i) { struct hisi_sas_sge *entry = &sge_page->sge[i]; entry->addr = cpu_to_le64(sg_dma_address(sg)); entry->page_ctrl_0 = entry->page_ctrl_1 = 0; entry->data_len = cpu_to_le32(sg_dma_len(sg)); entry->data_off = 0; } hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot)); hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF); } static void prep_smp_v1_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) { struct sas_task *task = slot->task; struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; struct domain_device *device = task->dev; struct hisi_sas_port *port = slot->port; struct scatterlist *sg_req; struct hisi_sas_device *sas_dev = device->lldd_dev; dma_addr_t req_dma_addr; unsigned int req_len; /* req */ sg_req = &task->smp_task.smp_req; req_len = sg_dma_len(sg_req); req_dma_addr = sg_dma_address(sg_req); /* create header */ /* dw0 */ hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) | (1 << CMD_HDR_PRIORITY_OFF) | /* high pri */ (1 << CMD_HDR_MODE_OFF) | /* ini mode */ (2 << CMD_HDR_CMD_OFF)); /* smp */ /* map itct entry */ hdr->dw1 = cpu_to_le32(sas_dev->device_id << CMD_HDR_DEVICE_ID_OFF); /* dw2 */ hdr->dw2 = cpu_to_le32((((req_len-4)/4) << CMD_HDR_CFL_OFF) | (HISI_SAS_MAX_SMP_RESP_SZ/4 << CMD_HDR_MRFL_OFF)); hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF); hdr->cmd_table_addr = cpu_to_le64(req_dma_addr); hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); } static void prep_ssp_v1_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) { struct sas_task *task = slot->task; struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; struct domain_device *device = task->dev; struct hisi_sas_device *sas_dev = device->lldd_dev; struct hisi_sas_port *port = slot->port; struct sas_ssp_task *ssp_task = &task->ssp_task; struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; struct sas_tmf_task *tmf = slot->tmf; int has_data = 0, priority = !!tmf; u8 *buf_cmd; u32 dw1, dw2; /* create header */ hdr->dw0 = cpu_to_le32((1 << CMD_HDR_RESP_REPORT_OFF) | (0x2 << CMD_HDR_TLR_CTRL_OFF) | (port->id << CMD_HDR_PORT_OFF) | (priority << CMD_HDR_PRIORITY_OFF) | (1 << CMD_HDR_MODE_OFF) | /* ini mode */ (1 << CMD_HDR_CMD_OFF)); /* ssp */ dw1 = 1 << CMD_HDR_VERIFY_DTL_OFF; if (tmf) { dw1 |= 3 << CMD_HDR_SSP_FRAME_TYPE_OFF; } else { switch (scsi_cmnd->sc_data_direction) { case DMA_TO_DEVICE: dw1 |= 2 << CMD_HDR_SSP_FRAME_TYPE_OFF; has_data = 1; break; case DMA_FROM_DEVICE: dw1 |= 1 << CMD_HDR_SSP_FRAME_TYPE_OFF; has_data = 1; break; default: dw1 |= 0 << CMD_HDR_SSP_FRAME_TYPE_OFF; } } /* map itct entry */ dw1 |= sas_dev->device_id << CMD_HDR_DEVICE_ID_OFF; hdr->dw1 = cpu_to_le32(dw1); if (tmf) { dw2 = ((sizeof(struct ssp_tmf_iu) + sizeof(struct ssp_frame_hdr)+3)/4) << CMD_HDR_CFL_OFF; } else { dw2 = ((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr)+3)/4) << CMD_HDR_CFL_OFF; } dw2 |= (HISI_SAS_MAX_SSP_RESP_SZ/4) << CMD_HDR_MRFL_OFF; hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF); if (has_data) prep_prd_sge_v1_hw(hisi_hba, slot, hdr, task->scatter, slot->n_elem); hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot) + sizeof(struct ssp_frame_hdr); hdr->dw2 = cpu_to_le32(dw2); memcpy(buf_cmd, &task->ssp_task.LUN, 8); if (!tmf) { buf_cmd[9] = task->ssp_task.task_attr; memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd, task->ssp_task.cmd->cmd_len); } else { buf_cmd[10] = tmf->tmf; switch (tmf->tmf) { case TMF_ABORT_TASK: case TMF_QUERY_TASK: buf_cmd[12] = (tmf->tag_of_task_to_be_managed >> 8) & 0xff; buf_cmd[13] = tmf->tag_of_task_to_be_managed & 0xff; break; default: break; } } } /* by default, task resp is complete */ static void slot_err_v1_hw(struct hisi_hba *hisi_hba, struct sas_task *task, struct hisi_sas_slot *slot) { struct task_status_struct *ts = &task->task_status; struct hisi_sas_err_record_v1 *err_record = hisi_sas_status_buf_addr_mem(slot); struct device *dev = hisi_hba->dev; switch (task->task_proto) { case SAS_PROTOCOL_SSP: { int error = -1; u32 dma_err_type = le32_to_cpu(err_record->dma_err_type); u32 dma_tx_err_type = ((dma_err_type & ERR_HDR_DMA_TX_ERR_TYPE_MSK)) >> ERR_HDR_DMA_TX_ERR_TYPE_OFF; u32 dma_rx_err_type = ((dma_err_type & ERR_HDR_DMA_RX_ERR_TYPE_MSK)) >> ERR_HDR_DMA_RX_ERR_TYPE_OFF; u32 trans_tx_fail_type = le32_to_cpu(err_record->trans_tx_fail_type); u32 trans_rx_fail_type = le32_to_cpu(err_record->trans_rx_fail_type); if (dma_tx_err_type) { /* dma tx err */ error = ffs(dma_tx_err_type) - 1 + DMA_TX_ERR_BASE; } else if (dma_rx_err_type) { /* dma rx err */ error = ffs(dma_rx_err_type) - 1 + DMA_RX_ERR_BASE; } else if (trans_tx_fail_type) { /* trans tx err */ error = ffs(trans_tx_fail_type) - 1 + TRANS_TX_FAIL_BASE; } else if (trans_rx_fail_type) { /* trans rx err */ error = ffs(trans_rx_fail_type) - 1 + TRANS_RX_FAIL_BASE; } switch (error) { case DMA_TX_DATA_UNDERFLOW_ERR: case DMA_RX_DATA_UNDERFLOW_ERR: { ts->residual = 0; ts->stat = SAS_DATA_UNDERRUN; break; } case DMA_TX_DATA_SGL_OVERFLOW_ERR: case DMA_TX_DIF_SGL_OVERFLOW_ERR: case DMA_TX_XFER_RDY_LENGTH_OVERFLOW_ERR: case DMA_RX_DATA_OVERFLOW_ERR: case TRANS_RX_FRAME_OVERRUN_ERR: case TRANS_RX_LINK_BUF_OVERRUN_ERR: { ts->stat = SAS_DATA_OVERRUN; ts->residual = 0; break; } case TRANS_TX_PHY_NOT_ENABLE_ERR: { ts->stat = SAS_PHY_DOWN; break; } case TRANS_TX_OPEN_REJCT_WRONG_DEST_ERR: case TRANS_TX_OPEN_REJCT_ZONE_VIOLATION_ERR: case TRANS_TX_OPEN_REJCT_BY_OTHER_ERR: case TRANS_TX_OPEN_REJCT_AIP_TIMEOUT_ERR: case TRANS_TX_OPEN_REJCT_STP_BUSY_ERR: case TRANS_TX_OPEN_REJCT_PROTOCOL_NOT_SUPPORT_ERR: case TRANS_TX_OPEN_REJCT_RATE_NOT_SUPPORT_ERR: case TRANS_TX_OPEN_REJCT_BAD_DEST_ERR: case TRANS_TX_OPEN_BREAK_RECEIVE_ERR: case TRANS_TX_OPEN_REJCT_PATHWAY_BLOCKED_ERR: case TRANS_TX_OPEN_REJCT_NO_DEST_ERR: case TRANS_TX_OPEN_RETRY_ERR: { ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_UNKNOWN; break; } case TRANS_TX_OPEN_TIMEOUT_ERR: { ts->stat = SAS_OPEN_TO; break; } case TRANS_TX_NAK_RECEIVE_ERR: case TRANS_TX_ACK_NAK_TIMEOUT_ERR: { ts->stat = SAS_NAK_R_ERR; break; } case TRANS_TX_CREDIT_TIMEOUT_ERR: case TRANS_TX_CLOSE_NORMAL_ERR: { /* This will request a retry */ ts->stat = SAS_QUEUE_FULL; slot->abort = 1; break; } default: { ts->stat = SAS_SAM_STAT_CHECK_CONDITION; break; } } } break; case SAS_PROTOCOL_SMP: ts->stat = SAS_SAM_STAT_CHECK_CONDITION; break; case SAS_PROTOCOL_SATA: case SAS_PROTOCOL_STP: case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: { dev_err(dev, "slot err: SATA/STP not supported\n"); } break; default: break; } } static void slot_complete_v1_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) { struct sas_task *task = slot->task; struct hisi_sas_device *sas_dev; struct device *dev = hisi_hba->dev; struct task_status_struct *ts; struct domain_device *device; struct hisi_sas_complete_v1_hdr *complete_queue = hisi_hba->complete_hdr[slot->cmplt_queue]; struct hisi_sas_complete_v1_hdr *complete_hdr; unsigned long flags; u32 cmplt_hdr_data; complete_hdr = &complete_queue[slot->cmplt_queue_slot]; cmplt_hdr_data = le32_to_cpu(complete_hdr->data); if (unlikely(!task || !task->lldd_task || !task->dev)) return; ts = &task->task_status; device = task->dev; sas_dev = device->lldd_dev; spin_lock_irqsave(&task->task_state_lock, flags); task->task_state_flags &= ~SAS_TASK_STATE_PENDING; task->task_state_flags |= SAS_TASK_STATE_DONE; spin_unlock_irqrestore(&task->task_state_lock, flags); memset(ts, 0, sizeof(*ts)); ts->resp = SAS_TASK_COMPLETE; if (unlikely(!sas_dev)) { dev_dbg(dev, "slot complete: port has no device\n"); ts->stat = SAS_PHY_DOWN; goto out; } if (cmplt_hdr_data & CMPLT_HDR_IO_CFG_ERR_MSK) { u32 info_reg = hisi_sas_read32(hisi_hba, HGC_INVLD_DQE_INFO); if (info_reg & HGC_INVLD_DQE_INFO_DQ_MSK) dev_err(dev, "slot complete: [%d:%d] has dq IPTT err\n", slot->cmplt_queue, slot->cmplt_queue_slot); if (info_reg & HGC_INVLD_DQE_INFO_TYPE_MSK) dev_err(dev, "slot complete: [%d:%d] has dq type err\n", slot->cmplt_queue, slot->cmplt_queue_slot); if (info_reg & HGC_INVLD_DQE_INFO_FORCE_MSK) dev_err(dev, "slot complete: [%d:%d] has dq force phy err\n", slot->cmplt_queue, slot->cmplt_queue_slot); if (info_reg & HGC_INVLD_DQE_INFO_PHY_MSK) dev_err(dev, "slot complete: [%d:%d] has dq phy id err\n", slot->cmplt_queue, slot->cmplt_queue_slot); if (info_reg & HGC_INVLD_DQE_INFO_ABORT_MSK) dev_err(dev, "slot complete: [%d:%d] has dq abort flag err\n", slot->cmplt_queue, slot->cmplt_queue_slot); if (info_reg & HGC_INVLD_DQE_INFO_IPTT_OF_MSK) dev_err(dev, "slot complete: [%d:%d] has dq IPTT or ICT err\n", slot->cmplt_queue, slot->cmplt_queue_slot); if (info_reg & HGC_INVLD_DQE_INFO_SSP_ERR_MSK) dev_err(dev, "slot complete: [%d:%d] has dq SSP frame type err\n", slot->cmplt_queue, slot->cmplt_queue_slot); if (info_reg & HGC_INVLD_DQE_INFO_OFL_MSK) dev_err(dev, "slot complete: [%d:%d] has dq order frame len err\n", slot->cmplt_queue, slot->cmplt_queue_slot); ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_UNKNOWN; goto out; } if (cmplt_hdr_data & CMPLT_HDR_ERR_RCRD_XFRD_MSK && !(cmplt_hdr_data & CMPLT_HDR_RSPNS_XFRD_MSK)) { slot_err_v1_hw(hisi_hba, task, slot); if (unlikely(slot->abort)) { if (dev_is_sata(device) && task->ata_task.use_ncq) sas_ata_device_link_abort(device, true); else sas_task_abort(task); return; } goto out; } switch (task->task_proto) { case SAS_PROTOCOL_SSP: { struct hisi_sas_status_buffer *status_buffer = hisi_sas_status_buf_addr_mem(slot); struct ssp_response_iu *iu = (struct ssp_response_iu *) &status_buffer->iu[0]; sas_ssp_task_response(dev, task, iu); break; } case SAS_PROTOCOL_SMP: { struct scatterlist *sg_resp = &task->smp_task.smp_resp; void *to = page_address(sg_page(sg_resp)); ts->stat = SAS_SAM_STAT_GOOD; memcpy(to + sg_resp->offset, hisi_sas_status_buf_addr_mem(slot) + sizeof(struct hisi_sas_err_record), sg_resp->length); break; } case SAS_PROTOCOL_SATA: case SAS_PROTOCOL_STP: case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: dev_err(dev, "slot complete: SATA/STP not supported\n"); break; default: ts->stat = SAS_SAM_STAT_CHECK_CONDITION; break; } if (!slot->port->port_attached) { dev_err(dev, "slot complete: port %d has removed\n", slot->port->sas_port.id); ts->stat = SAS_PHY_DOWN; } out: hisi_sas_slot_task_free(hisi_hba, task, slot, true); if (task->task_done) task->task_done(task); } /* Interrupts */ static irqreturn_t int_phyup_v1_hw(int irq_no, void *p) { struct hisi_sas_phy *phy = p; struct hisi_hba *hisi_hba = phy->hisi_hba; struct device *dev = hisi_hba->dev; struct asd_sas_phy *sas_phy = &phy->sas_phy; int i, phy_no = sas_phy->id; u32 irq_value, context, port_id, link_rate; u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd; struct sas_identify_frame *id = (struct sas_identify_frame *)frame_rcvd; irqreturn_t res = IRQ_HANDLED; irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2); if (!(irq_value & CHL_INT2_SL_PHY_ENA_MSK)) { dev_dbg(dev, "phyup: irq_value = %x not set enable bit\n", irq_value); res = IRQ_NONE; goto end; } context = hisi_sas_read32(hisi_hba, PHY_CONTEXT); if (context & 1 << phy_no) { dev_err(dev, "phyup: phy%d SATA attached equipment\n", phy_no); goto end; } port_id = (hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA) >> (4 * phy_no)) & 0xf; if (port_id == 0xf) { dev_err(dev, "phyup: phy%d invalid portid\n", phy_no); res = IRQ_NONE; goto end; } for (i = 0; i < 6; i++) { u32 idaf = hisi_sas_phy_read32(hisi_hba, phy_no, RX_IDAF_DWORD0 + (i * 4)); frame_rcvd[i] = __swab32(idaf); } /* Get the linkrate */ link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE); link_rate = (link_rate >> (phy_no * 4)) & 0xf; sas_phy->linkrate = link_rate; sas_phy->oob_mode = SAS_OOB_MODE; memcpy(sas_phy->attached_sas_addr, &id->sas_addr, SAS_ADDR_SIZE); dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate); phy->port_id = port_id; phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); phy->phy_type |= PORT_TYPE_SAS; phy->phy_attached = 1; phy->identify.device_type = id->dev_type; phy->frame_rcvd_size = sizeof(struct sas_identify_frame); if (phy->identify.device_type == SAS_END_DEVICE) phy->identify.target_port_protocols = SAS_PROTOCOL_SSP; else if (phy->identify.device_type != SAS_PHY_UNUSED) phy->identify.target_port_protocols = SAS_PROTOCOL_SMP; hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP); end: if (phy->reset_completion) complete(phy->reset_completion); hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2, CHL_INT2_SL_PHY_ENA_MSK); if (irq_value & CHL_INT2_SL_PHY_ENA_MSK) { u32 chl_int0 = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT0); chl_int0 &= ~CHL_INT0_PHYCTRL_NOTRDY_MSK; hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, chl_int0); hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0_MSK, 0x3ce3ee); } return res; } static irqreturn_t int_bcast_v1_hw(int irq, void *p) { struct hisi_sas_phy *phy = p; struct hisi_hba *hisi_hba = phy->hisi_hba; struct asd_sas_phy *sas_phy = &phy->sas_phy; struct device *dev = hisi_hba->dev; int phy_no = sas_phy->id; u32 irq_value; irqreturn_t res = IRQ_HANDLED; irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2); if (!(irq_value & CHL_INT2_SL_RX_BC_ACK_MSK)) { dev_err(dev, "bcast: irq_value = %x not set enable bit\n", irq_value); res = IRQ_NONE; goto end; } hisi_sas_phy_bcast(phy); end: hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2, CHL_INT2_SL_RX_BC_ACK_MSK); return res; } static irqreturn_t int_abnormal_v1_hw(int irq, void *p) { struct hisi_sas_phy *phy = p; struct hisi_hba *hisi_hba = phy->hisi_hba; struct device *dev = hisi_hba->dev; struct asd_sas_phy *sas_phy = &phy->sas_phy; u32 irq_value, irq_mask_old; int phy_no = sas_phy->id; /* mask_int0 */ irq_mask_old = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT0_MSK); hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0_MSK, 0x3fffff); /* read int0 */ irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT0); if (irq_value & CHL_INT0_PHYCTRL_NOTRDY_MSK) { u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0, GFP_ATOMIC); } if (irq_value & CHL_INT0_ID_TIMEOUT_MSK) dev_dbg(dev, "abnormal: ID_TIMEOUT phy%d identify timeout\n", phy_no); if (irq_value & CHL_INT0_DWS_LOST_MSK) dev_dbg(dev, "abnormal: DWS_LOST phy%d dws lost\n", phy_no); if (irq_value & CHL_INT0_SN_FAIL_NGR_MSK) dev_dbg(dev, "abnormal: SN_FAIL_NGR phy%d sn fail ngr\n", phy_no); if (irq_value & CHL_INT0_SL_IDAF_FAIL_MSK || irq_value & CHL_INT0_SL_OPAF_FAIL_MSK) dev_dbg(dev, "abnormal: SL_ID/OPAF_FAIL phy%d check adr frm err\n", phy_no); if (irq_value & CHL_INT0_SL_PS_FAIL_OFF) dev_dbg(dev, "abnormal: SL_PS_FAIL phy%d fail\n", phy_no); /* write to zero */ hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, irq_value); if (irq_value & CHL_INT0_PHYCTRL_NOTRDY_MSK) hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0_MSK, 0x3fffff & ~CHL_INT0_MSK_PHYCTRL_NOTRDY_MSK); else hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0_MSK, irq_mask_old); return IRQ_HANDLED; } static irqreturn_t cq_interrupt_v1_hw(int irq, void *p) { struct hisi_sas_cq *cq = p; struct hisi_hba *hisi_hba = cq->hisi_hba; struct hisi_sas_slot *slot; int queue = cq->id; struct hisi_sas_complete_v1_hdr *complete_queue = (struct hisi_sas_complete_v1_hdr *) hisi_hba->complete_hdr[queue]; u32 rd_point = cq->rd_point, wr_point; spin_lock(&hisi_hba->lock); hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue); wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR + (0x14 * queue)); while (rd_point != wr_point) { struct hisi_sas_complete_v1_hdr *complete_hdr; int idx; u32 cmplt_hdr_data; complete_hdr = &complete_queue[rd_point]; cmplt_hdr_data = le32_to_cpu(complete_hdr->data); idx = (cmplt_hdr_data & CMPLT_HDR_IPTT_MSK) >> CMPLT_HDR_IPTT_OFF; slot = &hisi_hba->slot_info[idx]; /* The completion queue and queue slot index are not * necessarily the same as the delivery queue and * queue slot index. */ slot->cmplt_queue_slot = rd_point; slot->cmplt_queue = queue; slot_complete_v1_hw(hisi_hba, slot); if (++rd_point >= HISI_SAS_QUEUE_SLOTS) rd_point = 0; } /* update rd_point */ cq->rd_point = rd_point; hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point); spin_unlock(&hisi_hba->lock); return IRQ_HANDLED; } static irqreturn_t fatal_ecc_int_v1_hw(int irq, void *p) { struct hisi_hba *hisi_hba = p; struct device *dev = hisi_hba->dev; u32 ecc_int = hisi_sas_read32(hisi_hba, SAS_ECC_INTR); if (ecc_int & SAS_ECC_INTR_DQ_ECC1B_MSK) { u32 ecc_err = hisi_sas_read32(hisi_hba, HGC_ECC_ERR); panic("%s: Fatal DQ 1b ECC interrupt (0x%x)\n", dev_name(dev), ecc_err); } if (ecc_int & SAS_ECC_INTR_DQ_ECCBAD_MSK) { u32 addr = (hisi_sas_read32(hisi_hba, HGC_DQ_ECC_ADDR) & HGC_DQ_ECC_ADDR_BAD_MSK) >> HGC_DQ_ECC_ADDR_BAD_OFF; panic("%s: Fatal DQ RAM ECC interrupt @ 0x%08x\n", dev_name(dev), addr); } if (ecc_int & SAS_ECC_INTR_IOST_ECC1B_MSK) { u32 ecc_err = hisi_sas_read32(hisi_hba, HGC_ECC_ERR); panic("%s: Fatal IOST 1b ECC interrupt (0x%x)\n", dev_name(dev), ecc_err); } if (ecc_int & SAS_ECC_INTR_IOST_ECCBAD_MSK) { u32 addr = (hisi_sas_read32(hisi_hba, HGC_IOST_ECC_ADDR) & HGC_IOST_ECC_ADDR_BAD_MSK) >> HGC_IOST_ECC_ADDR_BAD_OFF; panic("%s: Fatal IOST RAM ECC interrupt @ 0x%08x\n", dev_name(dev), addr); } if (ecc_int & SAS_ECC_INTR_ITCT_ECCBAD_MSK) { u32 addr = (hisi_sas_read32(hisi_hba, HGC_ITCT_ECC_ADDR) & HGC_ITCT_ECC_ADDR_BAD_MSK) >> HGC_ITCT_ECC_ADDR_BAD_OFF; panic("%s: Fatal TCT RAM ECC interrupt @ 0x%08x\n", dev_name(dev), addr); } if (ecc_int & SAS_ECC_INTR_ITCT_ECC1B_MSK) { u32 ecc_err = hisi_sas_read32(hisi_hba, HGC_ECC_ERR); panic("%s: Fatal ITCT 1b ECC interrupt (0x%x)\n", dev_name(dev), ecc_err); } hisi_sas_write32(hisi_hba, SAS_ECC_INTR, ecc_int | 0x3f); return IRQ_HANDLED; } static irqreturn_t fatal_axi_int_v1_hw(int irq, void *p) { struct hisi_hba *hisi_hba = p; struct device *dev = hisi_hba->dev; u32 axi_int = hisi_sas_read32(hisi_hba, ENT_INT_SRC2); u32 axi_info = hisi_sas_read32(hisi_hba, HGC_AXI_FIFO_ERR_INFO); if (axi_int & ENT_INT_SRC2_DQ_CFG_ERR_MSK) panic("%s: Fatal DQ_CFG_ERR interrupt (0x%x)\n", dev_name(dev), axi_info); if (axi_int & ENT_INT_SRC2_CQ_CFG_ERR_MSK) panic("%s: Fatal CQ_CFG_ERR interrupt (0x%x)\n", dev_name(dev), axi_info); if (axi_int & ENT_INT_SRC2_AXI_WRONG_INT_MSK) panic("%s: Fatal AXI_WRONG_INT interrupt (0x%x)\n", dev_name(dev), axi_info); if (axi_int & ENT_INT_SRC2_AXI_OVERLF_INT_MSK) panic("%s: Fatal AXI_OVERLF_INT incorrect interrupt (0x%x)\n", dev_name(dev), axi_info); hisi_sas_write32(hisi_hba, ENT_INT_SRC2, axi_int | 0x30000000); return IRQ_HANDLED; } static irq_handler_t phy_interrupts[HISI_SAS_PHY_INT_NR] = { int_bcast_v1_hw, int_phyup_v1_hw, int_abnormal_v1_hw }; static irq_handler_t fatal_interrupts[HISI_SAS_MAX_QUEUES] = { fatal_ecc_int_v1_hw, fatal_axi_int_v1_hw }; static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba) { struct platform_device *pdev = hisi_hba->platform_dev; struct device *dev = &pdev->dev; int i, j, irq, rc, idx; for (i = 0; i < hisi_hba->n_phy; i++) { struct hisi_sas_phy *phy = &hisi_hba->phy[i]; idx = i * HISI_SAS_PHY_INT_NR; for (j = 0; j < HISI_SAS_PHY_INT_NR; j++, idx++) { irq = platform_get_irq(pdev, idx); if (irq < 0) return irq; rc = devm_request_irq(dev, irq, phy_interrupts[j], 0, DRV_NAME " phy", phy); if (rc) { dev_err(dev, "irq init: could not request phy interrupt %d, rc=%d\n", irq, rc); return rc; } } } idx = hisi_hba->n_phy * HISI_SAS_PHY_INT_NR; for (i = 0; i < hisi_hba->queue_count; i++, idx++) { irq = platform_get_irq(pdev, idx); if (irq < 0) return irq; rc = devm_request_irq(dev, irq, cq_interrupt_v1_hw, 0, DRV_NAME " cq", &hisi_hba->cq[i]); if (rc) { dev_err(dev, "irq init: could not request cq interrupt %d, rc=%d\n", irq, rc); return rc; } } idx = (hisi_hba->n_phy * HISI_SAS_PHY_INT_NR) + hisi_hba->queue_count; for (i = 0; i < HISI_SAS_FATAL_INT_NR; i++, idx++) { irq = platform_get_irq(pdev, idx); if (irq < 0) return irq; rc = devm_request_irq(dev, irq, fatal_interrupts[i], 0, DRV_NAME " fatal", hisi_hba); if (rc) { dev_err(dev, "irq init: could not request fatal interrupt %d, rc=%d\n", irq, rc); return rc; } } hisi_hba->cq_nvecs = hisi_hba->queue_count; return 0; } static int interrupt_openall_v1_hw(struct hisi_hba *hisi_hba) { int i; u32 val; for (i = 0; i < hisi_hba->n_phy; i++) { /* Clear interrupt status */ val = hisi_sas_phy_read32(hisi_hba, i, CHL_INT0); hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, val); val = hisi_sas_phy_read32(hisi_hba, i, CHL_INT1); hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, val); val = hisi_sas_phy_read32(hisi_hba, i, CHL_INT2); hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, val); /* Unmask interrupt */ hisi_sas_phy_write32(hisi_hba, i, CHL_INT0_MSK, 0x3ce3ee); hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0x17fff); hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8000012a); /* bypass chip bug mask abnormal intr */ hisi_sas_phy_write32(hisi_hba, i, CHL_INT0_MSK, 0x3fffff & ~CHL_INT0_MSK_PHYCTRL_NOTRDY_MSK); } return 0; } static int hisi_sas_v1_init(struct hisi_hba *hisi_hba) { int rc; rc = hw_init_v1_hw(hisi_hba); if (rc) return rc; rc = interrupt_init_v1_hw(hisi_hba); if (rc) return rc; rc = interrupt_openall_v1_hw(hisi_hba); if (rc) return rc; return 0; } static struct attribute *host_v1_hw_attrs[] = { &dev_attr_phy_event_threshold.attr, NULL }; ATTRIBUTE_GROUPS(host_v1_hw); static const struct scsi_host_template sht_v1_hw = { .name = DRV_NAME, .proc_name = DRV_NAME, .module = THIS_MODULE, .queuecommand = sas_queuecommand, .dma_need_drain = ata_scsi_dma_need_drain, .target_alloc = sas_target_alloc, .slave_configure = hisi_sas_slave_configure, .scan_finished = hisi_sas_scan_finished, .scan_start = hisi_sas_scan_start, .change_queue_depth = sas_change_queue_depth, .bios_param = sas_bios_param, .this_id = -1, .sg_tablesize = HISI_SAS_SGE_PAGE_CNT, .max_sectors = SCSI_DEFAULT_MAX_SECTORS, .eh_device_reset_handler = sas_eh_device_reset_handler, .eh_target_reset_handler = sas_eh_target_reset_handler, .slave_alloc = hisi_sas_slave_alloc, .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = sas_ioctl, #endif .shost_groups = host_v1_hw_groups, .host_reset = hisi_sas_host_reset, }; static const struct hisi_sas_hw hisi_sas_v1_hw = { .hw_init = hisi_sas_v1_init, .setup_itct = setup_itct_v1_hw, .sl_notify_ssp = sl_notify_ssp_v1_hw, .clear_itct = clear_itct_v1_hw, .prep_smp = prep_smp_v1_hw, .prep_ssp = prep_ssp_v1_hw, .start_delivery = start_delivery_v1_hw, .phys_init = phys_init_v1_hw, .phy_start = start_phy_v1_hw, .phy_disable = disable_phy_v1_hw, .phy_hard_reset = phy_hard_reset_v1_hw, .phy_set_linkrate = phy_set_linkrate_v1_hw, .phy_get_max_linkrate = phy_get_max_linkrate_v1_hw, .get_wideport_bitmap = get_wideport_bitmap_v1_hw, .complete_hdr_size = sizeof(struct hisi_sas_complete_v1_hdr), .sht = &sht_v1_hw, }; static int hisi_sas_v1_probe(struct platform_device *pdev) { return hisi_sas_probe(pdev, &hisi_sas_v1_hw); } static const struct of_device_id sas_v1_of_match[] = { { .compatible = "hisilicon,hip05-sas-v1",}, {}, }; MODULE_DEVICE_TABLE(of, sas_v1_of_match); static const struct acpi_device_id sas_v1_acpi_match[] = { { "HISI0161", 0 }, { } }; MODULE_DEVICE_TABLE(acpi, sas_v1_acpi_match); static struct platform_driver hisi_sas_v1_driver = { .probe = hisi_sas_v1_probe, .remove_new = hisi_sas_remove, .driver = { .name = DRV_NAME, .of_match_table = sas_v1_of_match, .acpi_match_table = ACPI_PTR(sas_v1_acpi_match), }, }; module_platform_driver(hisi_sas_v1_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("John Garry <[email protected]>"); MODULE_DESCRIPTION("HISILICON SAS controller v1 hw driver"); MODULE_ALIAS("platform:" DRV_NAME);
linux-master
drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
/* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * The full GNU General Public License is included in this distribution * in the file called LICENSE.GPL. * * BSD LICENSE * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <scsi/sas.h> #include <linux/bitops.h> #include "isci.h" #include "port.h" #include "remote_device.h" #include "request.h" #include "remote_node_context.h" #include "scu_event_codes.h" #include "task.h" #undef C #define C(a) (#a) const char *dev_state_name(enum sci_remote_device_states state) { static const char * const strings[] = REMOTE_DEV_STATES; return strings[state]; } #undef C enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev, enum sci_remote_node_suspension_reasons reason) { return sci_remote_node_context_suspend(&idev->rnc, reason, SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT); } /** * isci_remote_device_ready() - This function is called by the ihost when the * remote device is ready. We mark the isci device as ready and signal the * waiting proccess. * @ihost: our valid isci_host * @idev: remote device * */ static void isci_remote_device_ready(struct isci_host *ihost, struct isci_remote_device *idev) { dev_dbg(&ihost->pdev->dev, "%s: idev = %p\n", __func__, idev); clear_bit(IDEV_IO_NCQERROR, &idev->flags); set_bit(IDEV_IO_READY, &idev->flags); if (test_and_clear_bit(IDEV_START_PENDING, &idev->flags)) wake_up(&ihost->eventq); } static enum sci_status sci_remote_device_terminate_req( struct isci_host *ihost, struct isci_remote_device *idev, int check_abort, struct isci_request *ireq) { if (!test_bit(IREQ_ACTIVE, &ireq->flags) || (ireq->target_device != idev) || (check_abort && !test_bit(IREQ_PENDING_ABORT, &ireq->flags))) return SCI_SUCCESS; dev_dbg(&ihost->pdev->dev, "%s: idev=%p; flags=%lx; req=%p; req target=%p\n", __func__, idev, idev->flags, ireq, ireq->target_device); set_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags); return sci_controller_terminate_request(ihost, idev, ireq); } static enum sci_status sci_remote_device_terminate_reqs_checkabort( struct isci_remote_device *idev, int chk) { struct isci_host *ihost = idev->owning_port->owning_controller; enum sci_status status = SCI_SUCCESS; u32 i; for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) { struct isci_request *ireq = ihost->reqs[i]; enum sci_status s; s = sci_remote_device_terminate_req(ihost, idev, chk, ireq); if (s != SCI_SUCCESS) status = s; } return status; } static bool isci_compare_suspendcount( struct isci_remote_device *idev, u32 localcount) { smp_rmb(); /* Check for a change in the suspend count, or the RNC * being destroyed. */ return (localcount != idev->rnc.suspend_count) || sci_remote_node_context_is_being_destroyed(&idev->rnc); } static bool isci_check_reqterm( struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq, u32 localcount) { unsigned long flags; bool res; spin_lock_irqsave(&ihost->scic_lock, flags); res = isci_compare_suspendcount(idev, localcount) && !test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags); spin_unlock_irqrestore(&ihost->scic_lock, flags); return res; } static bool isci_check_devempty( struct isci_host *ihost, struct isci_remote_device *idev, u32 localcount) { unsigned long flags; bool res; spin_lock_irqsave(&ihost->scic_lock, flags); res = isci_compare_suspendcount(idev, localcount) && idev->started_request_count == 0; spin_unlock_irqrestore(&ihost->scic_lock, flags); return res; } enum sci_status isci_remote_device_terminate_requests( struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq) { enum sci_status status = SCI_SUCCESS; unsigned long flags; u32 rnc_suspend_count; spin_lock_irqsave(&ihost->scic_lock, flags); if (isci_get_device(idev) == NULL) { dev_dbg(&ihost->pdev->dev, "%s: failed isci_get_device(idev=%p)\n", __func__, idev); spin_unlock_irqrestore(&ihost->scic_lock, flags); status = SCI_FAILURE; } else { /* If already suspended, don't wait for another suspension. */ smp_rmb(); rnc_suspend_count = sci_remote_node_context_is_suspended(&idev->rnc) ? 0 : idev->rnc.suspend_count; dev_dbg(&ihost->pdev->dev, "%s: idev=%p, ireq=%p; started_request_count=%d, " "rnc_suspend_count=%d, rnc.suspend_count=%d" "about to wait\n", __func__, idev, ireq, idev->started_request_count, rnc_suspend_count, idev->rnc.suspend_count); #define MAX_SUSPEND_MSECS 10000 if (ireq) { /* Terminate a specific TC. */ set_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags); sci_remote_device_terminate_req(ihost, idev, 0, ireq); spin_unlock_irqrestore(&ihost->scic_lock, flags); if (!wait_event_timeout(ihost->eventq, isci_check_reqterm(ihost, idev, ireq, rnc_suspend_count), msecs_to_jiffies(MAX_SUSPEND_MSECS))) { dev_warn(&ihost->pdev->dev, "%s host%d timeout single\n", __func__, ihost->id); dev_dbg(&ihost->pdev->dev, "%s: ******* Timeout waiting for " "suspend; idev=%p, current state %s; " "started_request_count=%d, flags=%lx\n\t" "rnc_suspend_count=%d, rnc.suspend_count=%d " "RNC: current state %s, current " "suspend_type %x dest state %d;\n" "ireq=%p, ireq->flags = %lx\n", __func__, idev, dev_state_name(idev->sm.current_state_id), idev->started_request_count, idev->flags, rnc_suspend_count, idev->rnc.suspend_count, rnc_state_name(idev->rnc.sm.current_state_id), idev->rnc.suspend_type, idev->rnc.destination_state, ireq, ireq->flags); } spin_lock_irqsave(&ihost->scic_lock, flags); clear_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags); if (!test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags)) isci_free_tag(ihost, ireq->io_tag); spin_unlock_irqrestore(&ihost->scic_lock, flags); } else { /* Terminate all TCs. */ sci_remote_device_terminate_requests(idev); spin_unlock_irqrestore(&ihost->scic_lock, flags); if (!wait_event_timeout(ihost->eventq, isci_check_devempty(ihost, idev, rnc_suspend_count), msecs_to_jiffies(MAX_SUSPEND_MSECS))) { dev_warn(&ihost->pdev->dev, "%s host%d timeout all\n", __func__, ihost->id); dev_dbg(&ihost->pdev->dev, "%s: ******* Timeout waiting for " "suspend; idev=%p, current state %s; " "started_request_count=%d, flags=%lx\n\t" "rnc_suspend_count=%d, " "RNC: current state %s, " "rnc.suspend_count=%d, current " "suspend_type %x dest state %d\n", __func__, idev, dev_state_name(idev->sm.current_state_id), idev->started_request_count, idev->flags, rnc_suspend_count, rnc_state_name(idev->rnc.sm.current_state_id), idev->rnc.suspend_count, idev->rnc.suspend_type, idev->rnc.destination_state); } } dev_dbg(&ihost->pdev->dev, "%s: idev=%p, wait done\n", __func__, idev); isci_put_device(idev); } return status; } /** * isci_remote_device_not_ready() - This function is called by the ihost when * the remote device is not ready. We mark the isci device as ready (not * "ready_for_io") and signal the waiting proccess. * @ihost: This parameter specifies the isci host object. * @idev: This parameter specifies the remote device * @reason: Reason to switch on * * sci_lock is held on entrance to this function. */ static void isci_remote_device_not_ready(struct isci_host *ihost, struct isci_remote_device *idev, u32 reason) { dev_dbg(&ihost->pdev->dev, "%s: isci_device = %p; reason = %d\n", __func__, idev, reason); switch (reason) { case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED: set_bit(IDEV_IO_NCQERROR, &idev->flags); /* Suspend the remote device so the I/O can be terminated. */ sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL); /* Kill all outstanding requests for the device. */ sci_remote_device_terminate_requests(idev); fallthrough; /* into the default case */ default: clear_bit(IDEV_IO_READY, &idev->flags); break; } } /* called once the remote node context is ready to be freed. * The remote device can now report that its stop operation is complete. none */ static void rnc_destruct_done(void *_dev) { struct isci_remote_device *idev = _dev; BUG_ON(idev->started_request_count != 0); sci_change_state(&idev->sm, SCI_DEV_STOPPED); } enum sci_status sci_remote_device_terminate_requests( struct isci_remote_device *idev) { return sci_remote_device_terminate_reqs_checkabort(idev, 0); } enum sci_status sci_remote_device_stop(struct isci_remote_device *idev, u32 timeout) { struct sci_base_state_machine *sm = &idev->sm; enum sci_remote_device_states state = sm->current_state_id; switch (state) { case SCI_DEV_INITIAL: case SCI_DEV_FAILED: case SCI_DEV_FINAL: default: dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", __func__, dev_state_name(state)); return SCI_FAILURE_INVALID_STATE; case SCI_DEV_STOPPED: return SCI_SUCCESS; case SCI_DEV_STARTING: /* device not started so there had better be no requests */ BUG_ON(idev->started_request_count != 0); sci_remote_node_context_destruct(&idev->rnc, rnc_destruct_done, idev); /* Transition to the stopping state and wait for the * remote node to complete being posted and invalidated. */ sci_change_state(sm, SCI_DEV_STOPPING); return SCI_SUCCESS; case SCI_DEV_READY: case SCI_STP_DEV_IDLE: case SCI_STP_DEV_CMD: case SCI_STP_DEV_NCQ: case SCI_STP_DEV_NCQ_ERROR: case SCI_STP_DEV_AWAIT_RESET: case SCI_SMP_DEV_IDLE: case SCI_SMP_DEV_CMD: sci_change_state(sm, SCI_DEV_STOPPING); if (idev->started_request_count == 0) sci_remote_node_context_destruct(&idev->rnc, rnc_destruct_done, idev); else { sci_remote_device_suspend( idev, SCI_SW_SUSPEND_LINKHANG_DETECT); sci_remote_device_terminate_requests(idev); } return SCI_SUCCESS; case SCI_DEV_STOPPING: /* All requests should have been terminated, but if there is an * attempt to stop a device already in the stopping state, then * try again to terminate. */ return sci_remote_device_terminate_requests(idev); case SCI_DEV_RESETTING: sci_change_state(sm, SCI_DEV_STOPPING); return SCI_SUCCESS; } } enum sci_status sci_remote_device_reset(struct isci_remote_device *idev) { struct sci_base_state_machine *sm = &idev->sm; enum sci_remote_device_states state = sm->current_state_id; switch (state) { case SCI_DEV_INITIAL: case SCI_DEV_STOPPED: case SCI_DEV_STARTING: case SCI_SMP_DEV_IDLE: case SCI_SMP_DEV_CMD: case SCI_DEV_STOPPING: case SCI_DEV_FAILED: case SCI_DEV_RESETTING: case SCI_DEV_FINAL: default: dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", __func__, dev_state_name(state)); return SCI_FAILURE_INVALID_STATE; case SCI_DEV_READY: case SCI_STP_DEV_IDLE: case SCI_STP_DEV_CMD: case SCI_STP_DEV_NCQ: case SCI_STP_DEV_NCQ_ERROR: case SCI_STP_DEV_AWAIT_RESET: sci_change_state(sm, SCI_DEV_RESETTING); return SCI_SUCCESS; } } enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev) { struct sci_base_state_machine *sm = &idev->sm; enum sci_remote_device_states state = sm->current_state_id; if (state != SCI_DEV_RESETTING) { dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", __func__, dev_state_name(state)); return SCI_FAILURE_INVALID_STATE; } sci_change_state(sm, SCI_DEV_READY); return SCI_SUCCESS; } enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev, u32 frame_index) { struct sci_base_state_machine *sm = &idev->sm; enum sci_remote_device_states state = sm->current_state_id; struct isci_host *ihost = idev->owning_port->owning_controller; enum sci_status status; switch (state) { case SCI_DEV_INITIAL: case SCI_DEV_STOPPED: case SCI_DEV_STARTING: case SCI_STP_DEV_IDLE: case SCI_SMP_DEV_IDLE: case SCI_DEV_FINAL: default: dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", __func__, dev_state_name(state)); /* Return the frame back to the controller */ sci_controller_release_frame(ihost, frame_index); return SCI_FAILURE_INVALID_STATE; case SCI_DEV_READY: case SCI_STP_DEV_NCQ_ERROR: case SCI_STP_DEV_AWAIT_RESET: case SCI_DEV_STOPPING: case SCI_DEV_FAILED: case SCI_DEV_RESETTING: { struct isci_request *ireq; struct ssp_frame_hdr hdr; void *frame_header; ssize_t word_cnt; status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, frame_index, &frame_header); if (status != SCI_SUCCESS) return status; word_cnt = sizeof(hdr) / sizeof(u32); sci_swab32_cpy(&hdr, frame_header, word_cnt); ireq = sci_request_by_tag(ihost, be16_to_cpu(hdr.tag)); if (ireq && ireq->target_device == idev) { /* The IO request is now in charge of releasing the frame */ status = sci_io_request_frame_handler(ireq, frame_index); } else { /* We could not map this tag to a valid IO * request Just toss the frame and continue */ sci_controller_release_frame(ihost, frame_index); } break; } case SCI_STP_DEV_NCQ: { struct dev_to_host_fis *hdr; status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, frame_index, (void **)&hdr); if (status != SCI_SUCCESS) return status; if (hdr->fis_type == FIS_SETDEVBITS && (hdr->status & ATA_ERR)) { idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED; /* TODO Check sactive and complete associated IO if any. */ sci_change_state(sm, SCI_STP_DEV_NCQ_ERROR); } else if (hdr->fis_type == FIS_REGD2H && (hdr->status & ATA_ERR)) { /* * Some devices return D2H FIS when an NCQ error is detected. * Treat this like an SDB error FIS ready reason. */ idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED; sci_change_state(&idev->sm, SCI_STP_DEV_NCQ_ERROR); } else status = SCI_FAILURE; sci_controller_release_frame(ihost, frame_index); break; } case SCI_STP_DEV_CMD: case SCI_SMP_DEV_CMD: /* The device does not process any UF received from the hardware while * in this state. All unsolicited frames are forwarded to the io request * object. */ status = sci_io_request_frame_handler(idev->working_request, frame_index); break; } return status; } static bool is_remote_device_ready(struct isci_remote_device *idev) { struct sci_base_state_machine *sm = &idev->sm; enum sci_remote_device_states state = sm->current_state_id; switch (state) { case SCI_DEV_READY: case SCI_STP_DEV_IDLE: case SCI_STP_DEV_CMD: case SCI_STP_DEV_NCQ: case SCI_STP_DEV_NCQ_ERROR: case SCI_STP_DEV_AWAIT_RESET: case SCI_SMP_DEV_IDLE: case SCI_SMP_DEV_CMD: return true; default: return false; } } /* * called once the remote node context has transisitioned to a ready * state (after suspending RX and/or TX due to early D2H fis) */ static void atapi_remote_device_resume_done(void *_dev) { struct isci_remote_device *idev = _dev; struct isci_request *ireq = idev->working_request; sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); } enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev, u32 event_code) { enum sci_status status; struct sci_base_state_machine *sm = &idev->sm; enum sci_remote_device_states state = sm->current_state_id; switch (scu_get_event_type(event_code)) { case SCU_EVENT_TYPE_RNC_OPS_MISC: case SCU_EVENT_TYPE_RNC_SUSPEND_TX: case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: status = sci_remote_node_context_event_handler(&idev->rnc, event_code); break; case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT: if (scu_get_event_code(event_code) == SCU_EVENT_IT_NEXUS_TIMEOUT) { status = SCI_SUCCESS; /* Suspend the associated RNC */ sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL); dev_dbg(scirdev_to_dev(idev), "%s: device: %p event code: %x: %s\n", __func__, idev, event_code, is_remote_device_ready(idev) ? "I_T_Nexus_Timeout event" : "I_T_Nexus_Timeout event in wrong state"); break; } fallthrough; /* and treat as unhandled */ default: dev_dbg(scirdev_to_dev(idev), "%s: device: %p event code: %x: %s\n", __func__, idev, event_code, is_remote_device_ready(idev) ? "unexpected event" : "unexpected event in wrong state"); status = SCI_FAILURE_INVALID_STATE; break; } if (status != SCI_SUCCESS) return status; /* Decode device-specific states that may require an RNC resume during * normal operation. When the abort path is active, these resumes are * managed when the abort path exits. */ if (state == SCI_STP_DEV_ATAPI_ERROR) { /* For ATAPI error state resume the RNC right away. */ if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX || scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX) { return sci_remote_node_context_resume(&idev->rnc, atapi_remote_device_resume_done, idev); } } if (state == SCI_STP_DEV_IDLE) { /* We pick up suspension events to handle specifically to this * state. We resume the RNC right away. */ if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX || scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX) status = sci_remote_node_context_resume(&idev->rnc, NULL, NULL); } return status; } static void sci_remote_device_start_request(struct isci_remote_device *idev, struct isci_request *ireq, enum sci_status status) { struct isci_port *iport = idev->owning_port; /* cleanup requests that failed after starting on the port */ if (status != SCI_SUCCESS) sci_port_complete_io(iport, idev, ireq); else { kref_get(&idev->kref); idev->started_request_count++; } } enum sci_status sci_remote_device_start_io(struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq) { struct sci_base_state_machine *sm = &idev->sm; enum sci_remote_device_states state = sm->current_state_id; struct isci_port *iport = idev->owning_port; enum sci_status status; switch (state) { case SCI_DEV_INITIAL: case SCI_DEV_STOPPED: case SCI_DEV_STARTING: case SCI_STP_DEV_NCQ_ERROR: case SCI_DEV_STOPPING: case SCI_DEV_FAILED: case SCI_DEV_RESETTING: case SCI_DEV_FINAL: default: dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", __func__, dev_state_name(state)); return SCI_FAILURE_INVALID_STATE; case SCI_DEV_READY: /* attempt to start an io request for this device object. The remote * device object will issue the start request for the io and if * successful it will start the request for the port object then * increment its own request count. */ status = sci_port_start_io(iport, idev, ireq); if (status != SCI_SUCCESS) return status; status = sci_remote_node_context_start_io(&idev->rnc, ireq); if (status != SCI_SUCCESS) break; status = sci_request_start(ireq); break; case SCI_STP_DEV_IDLE: { /* handle the start io operation for a sata device that is in * the command idle state. - Evalute the type of IO request to * be started - If its an NCQ request change to NCQ substate - * If its any other command change to the CMD substate * * If this is a softreset we may want to have a different * substate. */ enum sci_remote_device_states new_state; struct sas_task *task = isci_request_access_task(ireq); status = sci_port_start_io(iport, idev, ireq); if (status != SCI_SUCCESS) return status; status = sci_remote_node_context_start_io(&idev->rnc, ireq); if (status != SCI_SUCCESS) break; status = sci_request_start(ireq); if (status != SCI_SUCCESS) break; if (task->ata_task.use_ncq) new_state = SCI_STP_DEV_NCQ; else { idev->working_request = ireq; new_state = SCI_STP_DEV_CMD; } sci_change_state(sm, new_state); break; } case SCI_STP_DEV_NCQ: { struct sas_task *task = isci_request_access_task(ireq); if (task->ata_task.use_ncq) { status = sci_port_start_io(iport, idev, ireq); if (status != SCI_SUCCESS) return status; status = sci_remote_node_context_start_io(&idev->rnc, ireq); if (status != SCI_SUCCESS) break; status = sci_request_start(ireq); } else return SCI_FAILURE_INVALID_STATE; break; } case SCI_STP_DEV_AWAIT_RESET: return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; case SCI_SMP_DEV_IDLE: status = sci_port_start_io(iport, idev, ireq); if (status != SCI_SUCCESS) return status; status = sci_remote_node_context_start_io(&idev->rnc, ireq); if (status != SCI_SUCCESS) break; status = sci_request_start(ireq); if (status != SCI_SUCCESS) break; idev->working_request = ireq; sci_change_state(&idev->sm, SCI_SMP_DEV_CMD); break; case SCI_STP_DEV_CMD: case SCI_SMP_DEV_CMD: /* device is already handling a command it can not accept new commands * until this one is complete. */ return SCI_FAILURE_INVALID_STATE; } sci_remote_device_start_request(idev, ireq, status); return status; } static enum sci_status common_complete_io(struct isci_port *iport, struct isci_remote_device *idev, struct isci_request *ireq) { enum sci_status status; status = sci_request_complete(ireq); if (status != SCI_SUCCESS) return status; status = sci_port_complete_io(iport, idev, ireq); if (status != SCI_SUCCESS) return status; sci_remote_device_decrement_request_count(idev); return status; } enum sci_status sci_remote_device_complete_io(struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq) { struct sci_base_state_machine *sm = &idev->sm; enum sci_remote_device_states state = sm->current_state_id; struct isci_port *iport = idev->owning_port; enum sci_status status; switch (state) { case SCI_DEV_INITIAL: case SCI_DEV_STOPPED: case SCI_DEV_STARTING: case SCI_STP_DEV_IDLE: case SCI_SMP_DEV_IDLE: case SCI_DEV_FAILED: case SCI_DEV_FINAL: default: dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", __func__, dev_state_name(state)); return SCI_FAILURE_INVALID_STATE; case SCI_DEV_READY: case SCI_STP_DEV_AWAIT_RESET: case SCI_DEV_RESETTING: status = common_complete_io(iport, idev, ireq); break; case SCI_STP_DEV_CMD: case SCI_STP_DEV_NCQ: case SCI_STP_DEV_NCQ_ERROR: case SCI_STP_DEV_ATAPI_ERROR: status = common_complete_io(iport, idev, ireq); if (status != SCI_SUCCESS) break; if (ireq->sci_status == SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { /* This request causes hardware error, device needs to be Lun Reset. * So here we force the state machine to IDLE state so the rest IOs * can reach RNC state handler, these IOs will be completed by RNC with * status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE". */ sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET); } else if (idev->started_request_count == 0) sci_change_state(sm, SCI_STP_DEV_IDLE); break; case SCI_SMP_DEV_CMD: status = common_complete_io(iport, idev, ireq); if (status != SCI_SUCCESS) break; sci_change_state(sm, SCI_SMP_DEV_IDLE); break; case SCI_DEV_STOPPING: status = common_complete_io(iport, idev, ireq); if (status != SCI_SUCCESS) break; if (idev->started_request_count == 0) sci_remote_node_context_destruct(&idev->rnc, rnc_destruct_done, idev); break; } if (status != SCI_SUCCESS) dev_err(scirdev_to_dev(idev), "%s: Port:0x%p Device:0x%p Request:0x%p Status:0x%x " "could not complete\n", __func__, iport, idev, ireq, status); else isci_put_device(idev); return status; } static void sci_remote_device_continue_request(void *dev) { struct isci_remote_device *idev = dev; /* we need to check if this request is still valid to continue. */ if (idev->working_request) sci_controller_continue_io(idev->working_request); } enum sci_status sci_remote_device_start_task(struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq) { struct sci_base_state_machine *sm = &idev->sm; enum sci_remote_device_states state = sm->current_state_id; struct isci_port *iport = idev->owning_port; enum sci_status status; switch (state) { case SCI_DEV_INITIAL: case SCI_DEV_STOPPED: case SCI_DEV_STARTING: case SCI_SMP_DEV_IDLE: case SCI_SMP_DEV_CMD: case SCI_DEV_STOPPING: case SCI_DEV_FAILED: case SCI_DEV_RESETTING: case SCI_DEV_FINAL: default: dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", __func__, dev_state_name(state)); return SCI_FAILURE_INVALID_STATE; case SCI_STP_DEV_IDLE: case SCI_STP_DEV_CMD: case SCI_STP_DEV_NCQ: case SCI_STP_DEV_NCQ_ERROR: case SCI_STP_DEV_AWAIT_RESET: status = sci_port_start_io(iport, idev, ireq); if (status != SCI_SUCCESS) return status; status = sci_request_start(ireq); if (status != SCI_SUCCESS) goto out; /* Note: If the remote device state is not IDLE this will * replace the request that probably resulted in the task * management request. */ idev->working_request = ireq; sci_change_state(sm, SCI_STP_DEV_CMD); /* The remote node context must cleanup the TCi to NCQ mapping * table. The only way to do this correctly is to either write * to the TLCR register or to invalidate and repost the RNC. In * either case the remote node context state machine will take * the correct action when the remote node context is suspended * and later resumed. */ sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT); status = sci_remote_node_context_start_task(&idev->rnc, ireq, sci_remote_device_continue_request, idev); out: sci_remote_device_start_request(idev, ireq, status); /* We need to let the controller start request handler know that * it can't post TC yet. We will provide a callback function to * post TC when RNC gets resumed. */ return SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS; case SCI_DEV_READY: status = sci_port_start_io(iport, idev, ireq); if (status != SCI_SUCCESS) return status; /* Resume the RNC as needed: */ status = sci_remote_node_context_start_task(&idev->rnc, ireq, NULL, NULL); if (status != SCI_SUCCESS) break; status = sci_request_start(ireq); break; } sci_remote_device_start_request(idev, ireq, status); return status; } void sci_remote_device_post_request(struct isci_remote_device *idev, u32 request) { struct isci_port *iport = idev->owning_port; u32 context; context = request | (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | (iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | idev->rnc.remote_node_index; sci_controller_post_request(iport->owning_controller, context); } /* called once the remote node context has transisitioned to a * ready state. This is the indication that the remote device object can also * transition to ready. */ static void remote_device_resume_done(void *_dev) { struct isci_remote_device *idev = _dev; if (is_remote_device_ready(idev)) return; /* go 'ready' if we are not already in a ready state */ sci_change_state(&idev->sm, SCI_DEV_READY); } static void sci_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev) { struct isci_remote_device *idev = _dev; struct isci_host *ihost = idev->owning_port->owning_controller; /* For NCQ operation we do not issue a isci_remote_device_not_ready(). * As a result, avoid sending the ready notification. */ if (idev->sm.previous_state_id != SCI_STP_DEV_NCQ) isci_remote_device_ready(ihost, idev); } static void sci_remote_device_initial_state_enter(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); /* Initial state is a transitional state to the stopped state */ sci_change_state(&idev->sm, SCI_DEV_STOPPED); } /** * sci_remote_device_destruct() - free remote node context and destruct * @idev: This parameter specifies the remote device to be destructed. * * Remote device objects are a limited resource. As such, they must be * protected. Thus calls to construct and destruct are mutually exclusive and * non-reentrant. The return value shall indicate if the device was * successfully destructed or if some failure occurred. enum sci_status This value * is returned if the device is successfully destructed. * SCI_FAILURE_INVALID_REMOTE_DEVICE This value is returned if the supplied * device isn't valid (e.g. it's already been destoryed, the handle isn't * valid, etc.). */ static enum sci_status sci_remote_device_destruct(struct isci_remote_device *idev) { struct sci_base_state_machine *sm = &idev->sm; enum sci_remote_device_states state = sm->current_state_id; struct isci_host *ihost; if (state != SCI_DEV_STOPPED) { dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", __func__, dev_state_name(state)); return SCI_FAILURE_INVALID_STATE; } ihost = idev->owning_port->owning_controller; sci_controller_free_remote_node_context(ihost, idev, idev->rnc.remote_node_index); idev->rnc.remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX; sci_change_state(sm, SCI_DEV_FINAL); return SCI_SUCCESS; } /** * isci_remote_device_deconstruct() - This function frees an isci_remote_device. * @ihost: This parameter specifies the isci host object. * @idev: This parameter specifies the remote device to be freed. * */ static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_remote_device *idev) { dev_dbg(&ihost->pdev->dev, "%s: isci_device = %p\n", __func__, idev); /* There should not be any outstanding io's. All paths to * here should go through isci_remote_device_nuke_requests. * If we hit this condition, we will need a way to complete * io requests in process */ BUG_ON(idev->started_request_count > 0); sci_remote_device_destruct(idev); list_del_init(&idev->node); isci_put_device(idev); } static void sci_remote_device_stopped_state_enter(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); struct isci_host *ihost = idev->owning_port->owning_controller; u32 prev_state; /* If we are entering from the stopping state let the SCI User know that * the stop operation has completed. */ prev_state = idev->sm.previous_state_id; if (prev_state == SCI_DEV_STOPPING) isci_remote_device_deconstruct(ihost, idev); sci_controller_remote_device_stopped(ihost, idev); } static void sci_remote_device_starting_state_enter(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); struct isci_host *ihost = idev->owning_port->owning_controller; isci_remote_device_not_ready(ihost, idev, SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED); } static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); struct isci_host *ihost = idev->owning_port->owning_controller; struct domain_device *dev = idev->domain_dev; if (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) { sci_change_state(&idev->sm, SCI_STP_DEV_IDLE); } else if (dev_is_expander(dev->dev_type)) { sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE); } else isci_remote_device_ready(ihost, idev); } static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); struct domain_device *dev = idev->domain_dev; if (dev->dev_type == SAS_END_DEVICE) { struct isci_host *ihost = idev->owning_port->owning_controller; isci_remote_device_not_ready(ihost, idev, SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED); } } static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); struct isci_host *ihost = idev->owning_port->owning_controller; dev_dbg(&ihost->pdev->dev, "%s: isci_device = %p\n", __func__, idev); sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT); } static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); struct isci_host *ihost = idev->owning_port->owning_controller; dev_dbg(&ihost->pdev->dev, "%s: isci_device = %p\n", __func__, idev); sci_remote_node_context_resume(&idev->rnc, NULL, NULL); } static void sci_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); idev->working_request = NULL; if (sci_remote_node_context_is_ready(&idev->rnc)) { /* * Since the RNC is ready, it's alright to finish completion * processing (e.g. signal the remote device is ready). */ sci_stp_remote_device_ready_idle_substate_resume_complete_handler(idev); } else { sci_remote_node_context_resume(&idev->rnc, sci_stp_remote_device_ready_idle_substate_resume_complete_handler, idev); } } static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); struct isci_host *ihost = idev->owning_port->owning_controller; BUG_ON(idev->working_request == NULL); isci_remote_device_not_ready(ihost, idev, SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED); } static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); struct isci_host *ihost = idev->owning_port->owning_controller; if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED) isci_remote_device_not_ready(ihost, idev, idev->not_ready_reason); } static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); struct isci_host *ihost = idev->owning_port->owning_controller; isci_remote_device_ready(ihost, idev); } static void sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); struct isci_host *ihost = idev->owning_port->owning_controller; BUG_ON(idev->working_request == NULL); isci_remote_device_not_ready(ihost, idev, SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED); } static void sci_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); idev->working_request = NULL; } static const struct sci_base_state sci_remote_device_state_table[] = { [SCI_DEV_INITIAL] = { .enter_state = sci_remote_device_initial_state_enter, }, [SCI_DEV_STOPPED] = { .enter_state = sci_remote_device_stopped_state_enter, }, [SCI_DEV_STARTING] = { .enter_state = sci_remote_device_starting_state_enter, }, [SCI_DEV_READY] = { .enter_state = sci_remote_device_ready_state_enter, .exit_state = sci_remote_device_ready_state_exit }, [SCI_STP_DEV_IDLE] = { .enter_state = sci_stp_remote_device_ready_idle_substate_enter, }, [SCI_STP_DEV_CMD] = { .enter_state = sci_stp_remote_device_ready_cmd_substate_enter, }, [SCI_STP_DEV_NCQ] = { }, [SCI_STP_DEV_NCQ_ERROR] = { .enter_state = sci_stp_remote_device_ready_ncq_error_substate_enter, }, [SCI_STP_DEV_ATAPI_ERROR] = { }, [SCI_STP_DEV_AWAIT_RESET] = { }, [SCI_SMP_DEV_IDLE] = { .enter_state = sci_smp_remote_device_ready_idle_substate_enter, }, [SCI_SMP_DEV_CMD] = { .enter_state = sci_smp_remote_device_ready_cmd_substate_enter, .exit_state = sci_smp_remote_device_ready_cmd_substate_exit, }, [SCI_DEV_STOPPING] = { }, [SCI_DEV_FAILED] = { }, [SCI_DEV_RESETTING] = { .enter_state = sci_remote_device_resetting_state_enter, .exit_state = sci_remote_device_resetting_state_exit }, [SCI_DEV_FINAL] = { }, }; /** * sci_remote_device_construct() - common construction * @iport: SAS/SATA port through which this device is accessed. * @idev: remote device to construct * * This routine just performs benign initialization and does not * allocate the remote_node_context which is left to * sci_remote_device_[de]a_construct(). sci_remote_device_destruct() * frees the remote_node_context(s) for the device. */ static void sci_remote_device_construct(struct isci_port *iport, struct isci_remote_device *idev) { idev->owning_port = iport; idev->started_request_count = 0; sci_init_sm(&idev->sm, sci_remote_device_state_table, SCI_DEV_INITIAL); sci_remote_node_context_construct(&idev->rnc, SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX); } /* * sci_remote_device_da_construct() - construct direct attached device. * * The information (e.g. IAF, Signature FIS, etc.) necessary to build * the device is known to the SCI Core since it is contained in the * sci_phy object. Remote node context(s) is/are a global resource * allocated by this routine, freed by sci_remote_device_destruct(). * * Returns: * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed. * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to * sata-only controller instance. * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted. */ static enum sci_status sci_remote_device_da_construct(struct isci_port *iport, struct isci_remote_device *idev) { enum sci_status status; struct sci_port_properties properties; sci_remote_device_construct(iport, idev); sci_port_get_properties(iport, &properties); /* Get accurate port width from port's phy mask for a DA device. */ idev->device_port_width = hweight32(properties.phy_mask); status = sci_controller_allocate_remote_node_context(iport->owning_controller, idev, &idev->rnc.remote_node_index); if (status != SCI_SUCCESS) return status; idev->connection_rate = sci_port_get_max_allowed_speed(iport); return SCI_SUCCESS; } /* * sci_remote_device_ea_construct() - construct expander attached device * * Remote node context(s) is/are a global resource allocated by this * routine, freed by sci_remote_device_destruct(). * * Returns: * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed. * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to * sata-only controller instance. * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted. */ static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport, struct isci_remote_device *idev) { struct domain_device *dev = idev->domain_dev; enum sci_status status; sci_remote_device_construct(iport, idev); status = sci_controller_allocate_remote_node_context(iport->owning_controller, idev, &idev->rnc.remote_node_index); if (status != SCI_SUCCESS) return status; /* For SAS-2 the physical link rate is actually a logical link * rate that incorporates multiplexing. The SCU doesn't * incorporate multiplexing and for the purposes of the * connection the logical link rate is that same as the * physical. Furthermore, the SAS-2 and SAS-1.1 fields overlay * one another, so this code works for both situations. */ idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport), dev->linkrate); /* / @todo Should I assign the port width by reading all of the phys on the port? */ idev->device_port_width = 1; return SCI_SUCCESS; } enum sci_status sci_remote_device_resume( struct isci_remote_device *idev, scics_sds_remote_node_context_callback cb_fn, void *cb_p) { enum sci_status status; status = sci_remote_node_context_resume(&idev->rnc, cb_fn, cb_p); if (status != SCI_SUCCESS) dev_dbg(scirdev_to_dev(idev), "%s: failed to resume: %d\n", __func__, status); return status; } static void isci_remote_device_resume_from_abort_complete(void *cbparam) { struct isci_remote_device *idev = cbparam; struct isci_host *ihost = idev->owning_port->owning_controller; scics_sds_remote_node_context_callback abort_resume_cb = idev->abort_resume_cb; dev_dbg(scirdev_to_dev(idev), "%s: passing-along resume: %p\n", __func__, abort_resume_cb); if (abort_resume_cb != NULL) { idev->abort_resume_cb = NULL; abort_resume_cb(idev->abort_resume_cbparam); } clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags); wake_up(&ihost->eventq); } static bool isci_remote_device_test_resume_done( struct isci_host *ihost, struct isci_remote_device *idev) { unsigned long flags; bool done; spin_lock_irqsave(&ihost->scic_lock, flags); done = !test_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags) || test_bit(IDEV_STOP_PENDING, &idev->flags) || sci_remote_node_context_is_being_destroyed(&idev->rnc); spin_unlock_irqrestore(&ihost->scic_lock, flags); return done; } static void isci_remote_device_wait_for_resume_from_abort( struct isci_host *ihost, struct isci_remote_device *idev) { dev_dbg(&ihost->pdev->dev, "%s: starting resume wait: %p\n", __func__, idev); #define MAX_RESUME_MSECS 10000 if (!wait_event_timeout(ihost->eventq, isci_remote_device_test_resume_done(ihost, idev), msecs_to_jiffies(MAX_RESUME_MSECS))) { dev_warn(&ihost->pdev->dev, "%s: #### Timeout waiting for " "resume: %p\n", __func__, idev); } clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags); dev_dbg(&ihost->pdev->dev, "%s: resume wait done: %p\n", __func__, idev); } enum sci_status isci_remote_device_resume_from_abort( struct isci_host *ihost, struct isci_remote_device *idev) { unsigned long flags; enum sci_status status = SCI_SUCCESS; int destroyed; spin_lock_irqsave(&ihost->scic_lock, flags); /* Preserve any current resume callbacks, for instance from other * resumptions. */ idev->abort_resume_cb = idev->rnc.user_callback; idev->abort_resume_cbparam = idev->rnc.user_cookie; set_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags); clear_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags); destroyed = sci_remote_node_context_is_being_destroyed(&idev->rnc); if (!destroyed) status = sci_remote_device_resume( idev, isci_remote_device_resume_from_abort_complete, idev); spin_unlock_irqrestore(&ihost->scic_lock, flags); if (!destroyed && (status == SCI_SUCCESS)) isci_remote_device_wait_for_resume_from_abort(ihost, idev); else clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags); return status; } /** * sci_remote_device_start() - This method will start the supplied remote * device. This method enables normal IO requests to flow through to the * remote device. * @idev: This parameter specifies the device to be started. * @timeout: This parameter specifies the number of milliseconds in which the * start operation should complete. * * An indication of whether the device was successfully started. SCI_SUCCESS * This value is returned if the device was successfully started. * SCI_FAILURE_INVALID_PHY This value is returned if the user attempts to start * the device when there have been no phys added to it. */ static enum sci_status sci_remote_device_start(struct isci_remote_device *idev, u32 timeout) { struct sci_base_state_machine *sm = &idev->sm; enum sci_remote_device_states state = sm->current_state_id; enum sci_status status; if (state != SCI_DEV_STOPPED) { dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", __func__, dev_state_name(state)); return SCI_FAILURE_INVALID_STATE; } status = sci_remote_device_resume(idev, remote_device_resume_done, idev); if (status != SCI_SUCCESS) return status; sci_change_state(sm, SCI_DEV_STARTING); return SCI_SUCCESS; } static enum sci_status isci_remote_device_construct(struct isci_port *iport, struct isci_remote_device *idev) { struct isci_host *ihost = iport->isci_host; struct domain_device *dev = idev->domain_dev; enum sci_status status; if (dev->parent && dev_is_expander(dev->parent->dev_type)) status = sci_remote_device_ea_construct(iport, idev); else status = sci_remote_device_da_construct(iport, idev); if (status != SCI_SUCCESS) { dev_dbg(&ihost->pdev->dev, "%s: construct failed: %d\n", __func__, status); return status; } /* start the device. */ status = sci_remote_device_start(idev, ISCI_REMOTE_DEVICE_START_TIMEOUT); if (status != SCI_SUCCESS) dev_warn(&ihost->pdev->dev, "remote device start failed: %d\n", status); return status; } /** * isci_remote_device_alloc() * This function builds the isci_remote_device when a libsas dev_found message * is received. * @ihost: This parameter specifies the isci host object. * @iport: This parameter specifies the isci_port connected to this device. * * pointer to new isci_remote_device. */ static struct isci_remote_device * isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport) { struct isci_remote_device *idev; int i; for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) { idev = &ihost->devices[i]; if (!test_and_set_bit(IDEV_ALLOCATED, &idev->flags)) break; } if (i >= SCI_MAX_REMOTE_DEVICES) { dev_warn(&ihost->pdev->dev, "%s: failed\n", __func__); return NULL; } if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n")) return NULL; return idev; } void isci_remote_device_release(struct kref *kref) { struct isci_remote_device *idev = container_of(kref, typeof(*idev), kref); struct isci_host *ihost = idev->isci_port->isci_host; idev->domain_dev = NULL; idev->isci_port = NULL; clear_bit(IDEV_START_PENDING, &idev->flags); clear_bit(IDEV_STOP_PENDING, &idev->flags); clear_bit(IDEV_IO_READY, &idev->flags); clear_bit(IDEV_GONE, &idev->flags); smp_mb__before_atomic(); clear_bit(IDEV_ALLOCATED, &idev->flags); wake_up(&ihost->eventq); } /** * isci_remote_device_stop() - This function is called internally to stop the * remote device. * @ihost: This parameter specifies the isci host object. * @idev: This parameter specifies the remote device. * * The status of the ihost request to stop. */ enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_remote_device *idev) { enum sci_status status; unsigned long flags; dev_dbg(&ihost->pdev->dev, "%s: isci_device = %p\n", __func__, idev); spin_lock_irqsave(&ihost->scic_lock, flags); idev->domain_dev->lldd_dev = NULL; /* disable new lookups */ set_bit(IDEV_GONE, &idev->flags); set_bit(IDEV_STOP_PENDING, &idev->flags); status = sci_remote_device_stop(idev, 50); spin_unlock_irqrestore(&ihost->scic_lock, flags); /* Wait for the stop complete callback. */ if (WARN_ONCE(status != SCI_SUCCESS, "failed to stop device\n")) /* nothing to wait for */; else wait_for_device_stop(ihost, idev); dev_dbg(&ihost->pdev->dev, "%s: isci_device = %p, waiting done.\n", __func__, idev); return status; } /** * isci_remote_device_gone() - This function is called by libsas when a domain * device is removed. * @dev: This parameter specifies the libsas domain device. */ void isci_remote_device_gone(struct domain_device *dev) { struct isci_host *ihost = dev_to_ihost(dev); struct isci_remote_device *idev = dev->lldd_dev; dev_dbg(&ihost->pdev->dev, "%s: domain_device = %p, isci_device = %p, isci_port = %p\n", __func__, dev, idev, idev->isci_port); isci_remote_device_stop(ihost, idev); } /** * isci_remote_device_found() - This function is called by libsas when a remote * device is discovered. A remote device object is created and started. the * function then sleeps until the sci core device started message is * received. * @dev: This parameter specifies the libsas domain device. * * status, zero indicates success. */ int isci_remote_device_found(struct domain_device *dev) { struct isci_host *isci_host = dev_to_ihost(dev); struct isci_port *isci_port = dev->port->lldd_port; struct isci_remote_device *isci_device; enum sci_status status; dev_dbg(&isci_host->pdev->dev, "%s: domain_device = %p\n", __func__, dev); if (!isci_port) return -ENODEV; isci_device = isci_remote_device_alloc(isci_host, isci_port); if (!isci_device) return -ENODEV; kref_init(&isci_device->kref); INIT_LIST_HEAD(&isci_device->node); spin_lock_irq(&isci_host->scic_lock); isci_device->domain_dev = dev; isci_device->isci_port = isci_port; list_add_tail(&isci_device->node, &isci_port->remote_dev_list); set_bit(IDEV_START_PENDING, &isci_device->flags); status = isci_remote_device_construct(isci_port, isci_device); dev_dbg(&isci_host->pdev->dev, "%s: isci_device = %p\n", __func__, isci_device); if (status == SCI_SUCCESS) { /* device came up, advertise it to the world */ dev->lldd_dev = isci_device; } else isci_put_device(isci_device); spin_unlock_irq(&isci_host->scic_lock); /* wait for the device ready callback. */ wait_for_device_start(isci_host, isci_device); return status == SCI_SUCCESS ? 0 : -ENODEV; } enum sci_status isci_remote_device_suspend_terminate( struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq) { unsigned long flags; enum sci_status status; /* Put the device into suspension. */ spin_lock_irqsave(&ihost->scic_lock, flags); set_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags); sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT); spin_unlock_irqrestore(&ihost->scic_lock, flags); /* Terminate and wait for the completions. */ status = isci_remote_device_terminate_requests(ihost, idev, ireq); if (status != SCI_SUCCESS) dev_dbg(&ihost->pdev->dev, "%s: isci_remote_device_terminate_requests(%p) " "returned %d!\n", __func__, idev, status); /* NOTE: RNC resumption is left to the caller! */ return status; } int isci_remote_device_is_safe_to_abort( struct isci_remote_device *idev) { return sci_remote_node_context_is_safe_to_abort(&idev->rnc); } enum sci_status sci_remote_device_abort_requests_pending_abort( struct isci_remote_device *idev) { return sci_remote_device_terminate_reqs_checkabort(idev, 1); } enum sci_status isci_remote_device_reset_complete( struct isci_host *ihost, struct isci_remote_device *idev) { unsigned long flags; enum sci_status status; spin_lock_irqsave(&ihost->scic_lock, flags); status = sci_remote_device_reset_complete(idev); spin_unlock_irqrestore(&ihost->scic_lock, flags); return status; } void isci_dev_set_hang_detection_timeout( struct isci_remote_device *idev, u32 timeout) { if (dev_is_sata(idev->domain_dev)) { if (timeout) { if (test_and_set_bit(IDEV_RNC_LLHANG_ENABLED, &idev->flags)) return; /* Already enabled. */ } else if (!test_and_clear_bit(IDEV_RNC_LLHANG_ENABLED, &idev->flags)) return; /* Not enabled. */ sci_port_set_hang_detection_timeout(idev->owning_port, timeout); } }
linux-master
drivers/scsi/isci/remote_device.c
/* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * The full GNU General Public License is included in this distribution * in the file called LICENSE.GPL. * * BSD LICENSE * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <scsi/scsi_cmnd.h> #include "isci.h" #include "task.h" #include "request.h" #include "scu_completion_codes.h" #include "scu_event_codes.h" #include "sas.h" #undef C #define C(a) (#a) const char *req_state_name(enum sci_base_request_states state) { static const char * const strings[] = REQUEST_STATES; return strings[state]; } #undef C static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq, int idx) { if (idx == 0) return &ireq->tc->sgl_pair_ab; else if (idx == 1) return &ireq->tc->sgl_pair_cd; else if (idx < 0) return NULL; else return &ireq->sg_table[idx - 2]; } static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost, struct isci_request *ireq, u32 idx) { u32 offset; if (idx == 0) { offset = (void *) &ireq->tc->sgl_pair_ab - (void *) &ihost->task_context_table[0]; return ihost->tc_dma + offset; } else if (idx == 1) { offset = (void *) &ireq->tc->sgl_pair_cd - (void *) &ihost->task_context_table[0]; return ihost->tc_dma + offset; } return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]); } static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg) { e->length = sg_dma_len(sg); e->address_upper = upper_32_bits(sg_dma_address(sg)); e->address_lower = lower_32_bits(sg_dma_address(sg)); e->address_modifier = 0; } static void sci_request_build_sgl(struct isci_request *ireq) { struct isci_host *ihost = ireq->isci_host; struct sas_task *task = isci_request_access_task(ireq); struct scatterlist *sg = NULL; dma_addr_t dma_addr; u32 sg_idx = 0; struct scu_sgl_element_pair *scu_sg = NULL; struct scu_sgl_element_pair *prev_sg = NULL; if (task->num_scatter > 0) { sg = task->scatter; while (sg) { scu_sg = to_sgl_element_pair(ireq, sg_idx); init_sgl_element(&scu_sg->A, sg); sg = sg_next(sg); if (sg) { init_sgl_element(&scu_sg->B, sg); sg = sg_next(sg); } else memset(&scu_sg->B, 0, sizeof(scu_sg->B)); if (prev_sg) { dma_addr = to_sgl_element_pair_dma(ihost, ireq, sg_idx); prev_sg->next_pair_upper = upper_32_bits(dma_addr); prev_sg->next_pair_lower = lower_32_bits(dma_addr); } prev_sg = scu_sg; sg_idx++; } } else { /* handle when no sg */ scu_sg = to_sgl_element_pair(ireq, sg_idx); dma_addr = dma_map_single(&ihost->pdev->dev, task->scatter, task->total_xfer_len, task->data_dir); ireq->zero_scatter_daddr = dma_addr; scu_sg->A.length = task->total_xfer_len; scu_sg->A.address_upper = upper_32_bits(dma_addr); scu_sg->A.address_lower = lower_32_bits(dma_addr); } if (scu_sg) { scu_sg->next_pair_upper = 0; scu_sg->next_pair_lower = 0; } } static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq) { struct ssp_cmd_iu *cmd_iu; struct sas_task *task = isci_request_access_task(ireq); cmd_iu = &ireq->ssp.cmd; memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8); cmd_iu->add_cdb_len = 0; cmd_iu->_r_a = 0; cmd_iu->_r_b = 0; cmd_iu->en_fburst = 0; /* unsupported */ cmd_iu->task_prio = 0; cmd_iu->task_attr = task->ssp_task.task_attr; cmd_iu->_r_c = 0; sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cmd->cmnd, (task->ssp_task.cmd->cmd_len+3) / sizeof(u32)); } static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq) { struct ssp_task_iu *task_iu; struct sas_task *task = isci_request_access_task(ireq); struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq); task_iu = &ireq->ssp.tmf; memset(task_iu, 0, sizeof(struct ssp_task_iu)); memcpy(task_iu->LUN, task->ssp_task.LUN, 8); task_iu->task_func = isci_tmf->tmf_code; task_iu->task_tag = (test_bit(IREQ_TMF, &ireq->flags)) ? isci_tmf->io_tag : SCI_CONTROLLER_INVALID_IO_TAG; } /* * This method is will fill in the SCU Task Context for any type of SSP request. */ static void scu_ssp_request_construct_task_context( struct isci_request *ireq, struct scu_task_context *task_context) { dma_addr_t dma_addr; struct isci_remote_device *idev; struct isci_port *iport; idev = ireq->target_device; iport = idev->owning_port; /* Fill in the TC with its required data */ task_context->abort = 0; task_context->priority = 0; task_context->initiator_request = 1; task_context->connection_rate = idev->connection_rate; task_context->protocol_engine_index = ISCI_PEG; task_context->logical_port_index = iport->physical_port_index; task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP; task_context->valid = SCU_TASK_CONTEXT_VALID; task_context->context_type = SCU_TASK_CONTEXT_TYPE; task_context->remote_node_index = idev->rnc.remote_node_index; task_context->command_code = 0; task_context->link_layer_control = 0; task_context->do_not_dma_ssp_good_response = 1; task_context->strict_ordering = 0; task_context->control_frame = 0; task_context->timeout_enable = 0; task_context->block_guard_enable = 0; task_context->address_modifier = 0; /* task_context->type.ssp.tag = ireq->io_tag; */ task_context->task_phase = 0x01; ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | (iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | ISCI_TAG_TCI(ireq->io_tag)); /* * Copy the physical address for the command buffer to the * SCU Task Context */ dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.cmd); task_context->command_iu_upper = upper_32_bits(dma_addr); task_context->command_iu_lower = lower_32_bits(dma_addr); /* * Copy the physical address for the response buffer to the * SCU Task Context */ dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.rsp); task_context->response_iu_upper = upper_32_bits(dma_addr); task_context->response_iu_lower = lower_32_bits(dma_addr); } static u8 scu_bg_blk_size(struct scsi_device *sdp) { switch (sdp->sector_size) { case 512: return 0; case 1024: return 1; case 4096: return 3; default: return 0xff; } } static u32 scu_dif_bytes(u32 len, u32 sector_size) { return (len >> ilog2(sector_size)) * 8; } static void scu_ssp_ireq_dif_insert(struct isci_request *ireq, u8 type, u8 op) { struct scu_task_context *tc = ireq->tc; struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task; u8 blk_sz = scu_bg_blk_size(scmd->device); tc->block_guard_enable = 1; tc->blk_prot_en = 1; tc->blk_sz = blk_sz; /* DIF write insert */ tc->blk_prot_func = 0x2; tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes, scmd->device->sector_size); /* always init to 0, used by hw */ tc->interm_crc_val = 0; tc->init_crc_seed = 0; tc->app_tag_verify = 0; tc->app_tag_gen = 0; tc->ref_tag_seed_verify = 0; /* always init to same as bg_blk_sz */ tc->UD_bytes_immed_val = scmd->device->sector_size; tc->reserved_DC_0 = 0; /* always init to 8 */ tc->DIF_bytes_immed_val = 8; tc->reserved_DC_1 = 0; tc->bgc_blk_sz = scmd->device->sector_size; tc->reserved_E0_0 = 0; tc->app_tag_gen_mask = 0; /** setup block guard control **/ tc->bgctl = 0; /* DIF write insert */ tc->bgctl_f.op = 0x2; tc->app_tag_verify_mask = 0; /* must init to 0 for hw */ tc->blk_guard_err = 0; tc->reserved_E8_0 = 0; if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) tc->ref_tag_seed_gen = scsi_prot_ref_tag(scmd); else if (type & SCSI_PROT_DIF_TYPE3) tc->ref_tag_seed_gen = 0; } static void scu_ssp_ireq_dif_strip(struct isci_request *ireq, u8 type, u8 op) { struct scu_task_context *tc = ireq->tc; struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task; u8 blk_sz = scu_bg_blk_size(scmd->device); tc->block_guard_enable = 1; tc->blk_prot_en = 1; tc->blk_sz = blk_sz; /* DIF read strip */ tc->blk_prot_func = 0x1; tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes, scmd->device->sector_size); /* always init to 0, used by hw */ tc->interm_crc_val = 0; tc->init_crc_seed = 0; tc->app_tag_verify = 0; tc->app_tag_gen = 0; if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) tc->ref_tag_seed_verify = scsi_prot_ref_tag(scmd); else if (type & SCSI_PROT_DIF_TYPE3) tc->ref_tag_seed_verify = 0; /* always init to same as bg_blk_sz */ tc->UD_bytes_immed_val = scmd->device->sector_size; tc->reserved_DC_0 = 0; /* always init to 8 */ tc->DIF_bytes_immed_val = 8; tc->reserved_DC_1 = 0; tc->bgc_blk_sz = scmd->device->sector_size; tc->reserved_E0_0 = 0; tc->app_tag_gen_mask = 0; /** setup block guard control **/ tc->bgctl = 0; /* DIF read strip */ tc->bgctl_f.crc_verify = 1; tc->bgctl_f.op = 0x1; if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) { tc->bgctl_f.ref_tag_chk = 1; tc->bgctl_f.app_f_detect = 1; } else if (type & SCSI_PROT_DIF_TYPE3) tc->bgctl_f.app_ref_f_detect = 1; tc->app_tag_verify_mask = 0; /* must init to 0 for hw */ tc->blk_guard_err = 0; tc->reserved_E8_0 = 0; tc->ref_tag_seed_gen = 0; } /* * This method is will fill in the SCU Task Context for a SSP IO request. */ static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq, enum dma_data_direction dir, u32 len) { struct scu_task_context *task_context = ireq->tc; struct sas_task *sas_task = ireq->ttype_ptr.io_task_ptr; struct scsi_cmnd *scmd = sas_task->uldd_task; u8 prot_type = scsi_get_prot_type(scmd); u8 prot_op = scsi_get_prot_op(scmd); scu_ssp_request_construct_task_context(ireq, task_context); task_context->ssp_command_iu_length = sizeof(struct ssp_cmd_iu) / sizeof(u32); task_context->type.ssp.frame_type = SSP_COMMAND; switch (dir) { case DMA_FROM_DEVICE: case DMA_NONE: default: task_context->task_type = SCU_TASK_TYPE_IOREAD; break; case DMA_TO_DEVICE: task_context->task_type = SCU_TASK_TYPE_IOWRITE; break; } task_context->transfer_length_bytes = len; if (task_context->transfer_length_bytes > 0) sci_request_build_sgl(ireq); if (prot_type != SCSI_PROT_DIF_TYPE0) { if (prot_op == SCSI_PROT_READ_STRIP) scu_ssp_ireq_dif_strip(ireq, prot_type, prot_op); else if (prot_op == SCSI_PROT_WRITE_INSERT) scu_ssp_ireq_dif_insert(ireq, prot_type, prot_op); } } /** * scu_ssp_task_request_construct_task_context() - This method will fill in * the SCU Task Context for a SSP Task request. The following important * settings are utilized: -# priority == SCU_TASK_PRIORITY_HIGH. This * ensures that the task request is issued ahead of other task destined * for the same Remote Node. -# task_type == SCU_TASK_TYPE_IOREAD. This * simply indicates that a normal request type (i.e. non-raw frame) is * being utilized to perform task management. -#control_frame == 1. This * ensures that the proper endianness is set so that the bytes are * transmitted in the right order for a task frame. * @ireq: This parameter specifies the task request object being constructed. */ static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq) { struct scu_task_context *task_context = ireq->tc; scu_ssp_request_construct_task_context(ireq, task_context); task_context->control_frame = 1; task_context->priority = SCU_TASK_PRIORITY_HIGH; task_context->task_type = SCU_TASK_TYPE_RAW_FRAME; task_context->transfer_length_bytes = 0; task_context->type.ssp.frame_type = SSP_TASK; task_context->ssp_command_iu_length = sizeof(struct ssp_task_iu) / sizeof(u32); } /** * scu_sata_request_construct_task_context() * This method is will fill in the SCU Task Context for any type of SATA * request. This is called from the various SATA constructors. * @ireq: The general IO request object which is to be used in * constructing the SCU task context. * @task_context: The buffer pointer for the SCU task context which is being * constructed. * * The general io request construction is complete. The buffer assignment for * the command buffer is complete. none Revisit task context construction to * determine what is common for SSP/SMP/STP task context structures. */ static void scu_sata_request_construct_task_context( struct isci_request *ireq, struct scu_task_context *task_context) { dma_addr_t dma_addr; struct isci_remote_device *idev; struct isci_port *iport; idev = ireq->target_device; iport = idev->owning_port; /* Fill in the TC with its required data */ task_context->abort = 0; task_context->priority = SCU_TASK_PRIORITY_NORMAL; task_context->initiator_request = 1; task_context->connection_rate = idev->connection_rate; task_context->protocol_engine_index = ISCI_PEG; task_context->logical_port_index = iport->physical_port_index; task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP; task_context->valid = SCU_TASK_CONTEXT_VALID; task_context->context_type = SCU_TASK_CONTEXT_TYPE; task_context->remote_node_index = idev->rnc.remote_node_index; task_context->command_code = 0; task_context->link_layer_control = 0; task_context->do_not_dma_ssp_good_response = 1; task_context->strict_ordering = 0; task_context->control_frame = 0; task_context->timeout_enable = 0; task_context->block_guard_enable = 0; task_context->address_modifier = 0; task_context->task_phase = 0x01; task_context->ssp_command_iu_length = (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32); /* Set the first word of the H2D REG FIS */ task_context->type.words[0] = *(u32 *)&ireq->stp.cmd; ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | (iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | ISCI_TAG_TCI(ireq->io_tag)); /* * Copy the physical address for the command buffer to the SCU Task * Context. We must offset the command buffer by 4 bytes because the * first 4 bytes are transfered in the body of the TC. */ dma_addr = sci_io_request_get_dma_addr(ireq, ((char *) &ireq->stp.cmd) + sizeof(u32)); task_context->command_iu_upper = upper_32_bits(dma_addr); task_context->command_iu_lower = lower_32_bits(dma_addr); /* SATA Requests do not have a response buffer */ task_context->response_iu_upper = 0; task_context->response_iu_lower = 0; } static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq) { struct scu_task_context *task_context = ireq->tc; scu_sata_request_construct_task_context(ireq, task_context); task_context->control_frame = 0; task_context->priority = SCU_TASK_PRIORITY_NORMAL; task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME; task_context->type.stp.fis_type = FIS_REGH2D; task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32); } static enum sci_status sci_stp_pio_request_construct(struct isci_request *ireq, bool copy_rx_frame) { struct isci_stp_request *stp_req = &ireq->stp.req; scu_stp_raw_request_construct_task_context(ireq); stp_req->status = 0; stp_req->sgl.offset = 0; stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A; if (copy_rx_frame) { sci_request_build_sgl(ireq); stp_req->sgl.index = 0; } else { /* The user does not want the data copied to the SGL buffer location */ stp_req->sgl.index = -1; } return SCI_SUCCESS; } /* * sci_stp_optimized_request_construct() * @ireq: This parameter specifies the request to be constructed as an * optimized request. * @optimized_task_type: This parameter specifies whether the request is to be * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A * value of 1 indicates NCQ. * * This method will perform request construction common to all types of STP * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method * returns an indication as to whether the construction was successful. */ static void sci_stp_optimized_request_construct(struct isci_request *ireq, u8 optimized_task_type, u32 len, enum dma_data_direction dir) { struct scu_task_context *task_context = ireq->tc; /* Build the STP task context structure */ scu_sata_request_construct_task_context(ireq, task_context); /* Copy over the SGL elements */ sci_request_build_sgl(ireq); /* Copy over the number of bytes to be transfered */ task_context->transfer_length_bytes = len; if (dir == DMA_TO_DEVICE) { /* * The difference between the DMA IN and DMA OUT request task type * values are consistent with the difference between FPDMA READ * and FPDMA WRITE values. Add the supplied task type parameter * to this difference to set the task type properly for this * DATA OUT (WRITE) case. */ task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT - SCU_TASK_TYPE_DMA_IN); } else { /* * For the DATA IN (READ) case, simply save the supplied * optimized task type. */ task_context->task_type = optimized_task_type; } } static void sci_atapi_construct(struct isci_request *ireq) { struct host_to_dev_fis *h2d_fis = &ireq->stp.cmd; struct sas_task *task; /* To simplify the implementation we take advantage of the * silicon's partial acceleration of atapi protocol (dma data * transfers), so we promote all commands to dma protocol. This * breaks compatibility with ATA_HORKAGE_ATAPI_MOD16_DMA drives. */ h2d_fis->features |= ATAPI_PKT_DMA; scu_stp_raw_request_construct_task_context(ireq); task = isci_request_access_task(ireq); if (task->data_dir == DMA_NONE) task->total_xfer_len = 0; /* clear the response so we can detect arrivial of an * unsolicited h2d fis */ ireq->stp.rsp.fis_type = 0; } static enum sci_status sci_io_request_construct_sata(struct isci_request *ireq, u32 len, enum dma_data_direction dir, bool copy) { enum sci_status status = SCI_SUCCESS; struct sas_task *task = isci_request_access_task(ireq); struct domain_device *dev = ireq->target_device->domain_dev; /* check for management protocols */ if (test_bit(IREQ_TMF, &ireq->flags)) { struct isci_tmf *tmf = isci_request_access_tmf(ireq); dev_err(&ireq->owning_controller->pdev->dev, "%s: Request 0x%p received un-handled SAT " "management protocol 0x%x.\n", __func__, ireq, tmf->tmf_code); return SCI_FAILURE; } if (!sas_protocol_ata(task->task_proto)) { dev_err(&ireq->owning_controller->pdev->dev, "%s: Non-ATA protocol in SATA path: 0x%x\n", __func__, task->task_proto); return SCI_FAILURE; } /* ATAPI */ if (dev->sata_dev.class == ATA_DEV_ATAPI && task->ata_task.fis.command == ATA_CMD_PACKET) { sci_atapi_construct(ireq); return SCI_SUCCESS; } /* non data */ if (task->data_dir == DMA_NONE) { scu_stp_raw_request_construct_task_context(ireq); return SCI_SUCCESS; } /* NCQ */ if (task->ata_task.use_ncq) { sci_stp_optimized_request_construct(ireq, SCU_TASK_TYPE_FPDMAQ_READ, len, dir); return SCI_SUCCESS; } /* DMA */ if (task->ata_task.dma_xfer) { sci_stp_optimized_request_construct(ireq, SCU_TASK_TYPE_DMA_IN, len, dir); return SCI_SUCCESS; } else /* PIO */ return sci_stp_pio_request_construct(ireq, copy); return status; } static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *ireq) { struct sas_task *task = isci_request_access_task(ireq); ireq->protocol = SAS_PROTOCOL_SSP; scu_ssp_io_request_construct_task_context(ireq, task->data_dir, task->total_xfer_len); sci_io_request_build_ssp_command_iu(ireq); sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); return SCI_SUCCESS; } enum sci_status sci_task_request_construct_ssp( struct isci_request *ireq) { /* Construct the SSP Task SCU Task Context */ scu_ssp_task_request_construct_task_context(ireq); /* Fill in the SSP Task IU */ sci_task_request_build_ssp_task_iu(ireq); sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); return SCI_SUCCESS; } static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *ireq) { enum sci_status status; bool copy = false; struct sas_task *task = isci_request_access_task(ireq); ireq->protocol = SAS_PROTOCOL_STP; copy = (task->data_dir == DMA_NONE) ? false : true; status = sci_io_request_construct_sata(ireq, task->total_xfer_len, task->data_dir, copy); if (status == SCI_SUCCESS) sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); return status; } #define SCU_TASK_CONTEXT_SRAM 0x200000 /** * sci_req_tx_bytes - bytes transferred when reply underruns request * @ireq: request that was terminated early */ static u32 sci_req_tx_bytes(struct isci_request *ireq) { struct isci_host *ihost = ireq->owning_controller; u32 ret_val = 0; if (readl(&ihost->smu_registers->address_modifier) == 0) { void __iomem *scu_reg_base = ihost->scu_registers; /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where * BAR1 is the scu_registers * 0x20002C = 0x200000 + 0x2c * = start of task context SRAM + offset of (type.ssp.data_offset) * TCi is the io_tag of struct sci_request */ ret_val = readl(scu_reg_base + (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) + ((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(ireq->io_tag))); } return ret_val; } enum sci_status sci_request_start(struct isci_request *ireq) { enum sci_base_request_states state; struct scu_task_context *tc = ireq->tc; struct isci_host *ihost = ireq->owning_controller; state = ireq->sm.current_state_id; if (state != SCI_REQ_CONSTRUCTED) { dev_warn(&ihost->pdev->dev, "%s: SCIC IO Request requested to start while in wrong " "state %d\n", __func__, state); return SCI_FAILURE_INVALID_STATE; } tc->task_index = ISCI_TAG_TCI(ireq->io_tag); switch (tc->protocol_type) { case SCU_TASK_CONTEXT_PROTOCOL_SMP: case SCU_TASK_CONTEXT_PROTOCOL_SSP: /* SSP/SMP Frame */ tc->type.ssp.tag = ireq->io_tag; tc->type.ssp.target_port_transfer_tag = 0xFFFF; break; case SCU_TASK_CONTEXT_PROTOCOL_STP: /* STP/SATA Frame * tc->type.stp.ncq_tag = ireq->ncq_tag; */ break; case SCU_TASK_CONTEXT_PROTOCOL_NONE: /* / @todo When do we set no protocol type? */ break; default: /* This should never happen since we build the IO * requests */ break; } /* Add to the post_context the io tag value */ ireq->post_context |= ISCI_TAG_TCI(ireq->io_tag); /* Everything is good go ahead and change state */ sci_change_state(&ireq->sm, SCI_REQ_STARTED); return SCI_SUCCESS; } enum sci_status sci_io_request_terminate(struct isci_request *ireq) { enum sci_base_request_states state; state = ireq->sm.current_state_id; switch (state) { case SCI_REQ_CONSTRUCTED: /* Set to make sure no HW terminate posting is done: */ set_bit(IREQ_TC_ABORT_POSTED, &ireq->flags); ireq->scu_status = SCU_TASK_DONE_TASK_ABORT; ireq->sci_status = SCI_FAILURE_IO_TERMINATED; sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); return SCI_SUCCESS; case SCI_REQ_STARTED: case SCI_REQ_TASK_WAIT_TC_COMP: case SCI_REQ_SMP_WAIT_RESP: case SCI_REQ_SMP_WAIT_TC_COMP: case SCI_REQ_STP_UDMA_WAIT_TC_COMP: case SCI_REQ_STP_UDMA_WAIT_D2H: case SCI_REQ_STP_NON_DATA_WAIT_H2D: case SCI_REQ_STP_NON_DATA_WAIT_D2H: case SCI_REQ_STP_PIO_WAIT_H2D: case SCI_REQ_STP_PIO_WAIT_FRAME: case SCI_REQ_STP_PIO_DATA_IN: case SCI_REQ_STP_PIO_DATA_OUT: case SCI_REQ_ATAPI_WAIT_H2D: case SCI_REQ_ATAPI_WAIT_PIO_SETUP: case SCI_REQ_ATAPI_WAIT_D2H: case SCI_REQ_ATAPI_WAIT_TC_COMP: /* Fall through and change state to ABORTING... */ case SCI_REQ_TASK_WAIT_TC_RESP: /* The task frame was already confirmed to have been * sent by the SCU HW. Since the state machine is * now only waiting for the task response itself, * abort the request and complete it immediately * and don't wait for the task response. */ sci_change_state(&ireq->sm, SCI_REQ_ABORTING); fallthrough; /* and handle like ABORTING */ case SCI_REQ_ABORTING: if (!isci_remote_device_is_safe_to_abort(ireq->target_device)) set_bit(IREQ_PENDING_ABORT, &ireq->flags); else clear_bit(IREQ_PENDING_ABORT, &ireq->flags); /* If the request is only waiting on the remote device * suspension, return SUCCESS so the caller will wait too. */ return SCI_SUCCESS; case SCI_REQ_COMPLETED: default: dev_warn(&ireq->owning_controller->pdev->dev, "%s: SCIC IO Request requested to abort while in wrong " "state %d\n", __func__, ireq->sm.current_state_id); break; } return SCI_FAILURE_INVALID_STATE; } enum sci_status sci_request_complete(struct isci_request *ireq) { enum sci_base_request_states state; struct isci_host *ihost = ireq->owning_controller; state = ireq->sm.current_state_id; if (WARN_ONCE(state != SCI_REQ_COMPLETED, "isci: request completion from wrong state (%s)\n", req_state_name(state))) return SCI_FAILURE_INVALID_STATE; if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) sci_controller_release_frame(ihost, ireq->saved_rx_frame_index); /* XXX can we just stop the machine and remove the 'final' state? */ sci_change_state(&ireq->sm, SCI_REQ_FINAL); return SCI_SUCCESS; } enum sci_status sci_io_request_event_handler(struct isci_request *ireq, u32 event_code) { enum sci_base_request_states state; struct isci_host *ihost = ireq->owning_controller; state = ireq->sm.current_state_id; if (state != SCI_REQ_STP_PIO_DATA_IN) { dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %s\n", __func__, event_code, req_state_name(state)); return SCI_FAILURE_INVALID_STATE; } switch (scu_get_event_specifier(event_code)) { case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT: /* We are waiting for data and the SCU has R_ERR the data frame. * Go back to waiting for the D2H Register FIS */ sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); return SCI_SUCCESS; default: dev_err(&ihost->pdev->dev, "%s: pio request unexpected event %#x\n", __func__, event_code); /* TODO Should we fail the PIO request when we get an * unexpected event? */ return SCI_FAILURE; } } /* * This function copies response data for requests returning response data * instead of sense data. * @sci_req: This parameter specifies the request object for which to copy * the response data. */ static void sci_io_request_copy_response(struct isci_request *ireq) { void *resp_buf; u32 len; struct ssp_response_iu *ssp_response; struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq); ssp_response = &ireq->ssp.rsp; resp_buf = &isci_tmf->resp.resp_iu; len = min_t(u32, SSP_RESP_IU_MAX_SIZE, be32_to_cpu(ssp_response->response_data_len)); memcpy(resp_buf, ssp_response->resp_data, len); } static enum sci_status request_started_state_tc_event(struct isci_request *ireq, u32 completion_code) { struct ssp_response_iu *resp_iu; u8 datapres; /* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000 * to determine SDMA status */ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): ireq->scu_status = SCU_TASK_DONE_GOOD; ireq->sci_status = SCI_SUCCESS; break; case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): { /* There are times when the SCU hardware will return an early * response because the io request specified more data than is * returned by the target device (mode pages, inquiry data, * etc.). We must check the response stats to see if this is * truly a failed request or a good request that just got * completed early. */ struct ssp_response_iu *resp = &ireq->ssp.rsp; ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); sci_swab32_cpy(&ireq->ssp.rsp, &ireq->ssp.rsp, word_cnt); if (resp->status == 0) { ireq->scu_status = SCU_TASK_DONE_GOOD; ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY; } else { ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; } break; } case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): { ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); sci_swab32_cpy(&ireq->ssp.rsp, &ireq->ssp.rsp, word_cnt); ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; break; } case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR): /* TODO With TASK_DONE_RESP_LEN_ERR is the response frame * guaranteed to be received before this completion status is * posted? */ resp_iu = &ireq->ssp.rsp; datapres = resp_iu->datapres; if (datapres == SAS_DATAPRES_RESPONSE_DATA || datapres == SAS_DATAPRES_SENSE_DATA) { ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; } else { ireq->scu_status = SCU_TASK_DONE_GOOD; ireq->sci_status = SCI_SUCCESS; } break; /* only stp device gets suspended. */ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR): if (ireq->protocol == SAS_PROTOCOL_STP) { ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> SCU_COMPLETION_TL_STATUS_SHIFT; ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; } else { ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> SCU_COMPLETION_TL_STATUS_SHIFT; ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; } break; /* both stp/ssp device gets suspended */ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED): ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> SCU_COMPLETION_TL_STATUS_SHIFT; ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; break; /* neither ssp nor stp gets suspended. */ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND): default: ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> SCU_COMPLETION_TL_STATUS_SHIFT; ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; break; } /* * TODO: This is probably wrong for ACK/NAK timeout conditions */ /* In all cases we will treat this as the completion of the IO req. */ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); return SCI_SUCCESS; } static enum sci_status request_aborting_state_tc_event(struct isci_request *ireq, u32 completion_code) { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT): ireq->scu_status = SCU_TASK_DONE_TASK_ABORT; ireq->sci_status = SCI_FAILURE_IO_TERMINATED; sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; default: /* Unless we get some strange error wait for the task abort to complete * TODO: Should there be a state change for this completion? */ break; } return SCI_SUCCESS; } static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq, u32 completion_code) { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): ireq->scu_status = SCU_TASK_DONE_GOOD; ireq->sci_status = SCI_SUCCESS; sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP); break; case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): /* Currently, the decision is to simply allow the task request * to timeout if the task IU wasn't received successfully. * There is a potential for receiving multiple task responses if * we decide to send the task IU again. */ dev_warn(&ireq->owning_controller->pdev->dev, "%s: TaskRequest:0x%p CompletionCode:%x - " "ACK/NAK timeout\n", __func__, ireq, completion_code); sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP); break; default: /* * All other completion status cause the IO to be complete. * If a NAK was received, then it is up to the user to retry * the request. */ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; } return SCI_SUCCESS; } static enum sci_status smp_request_await_response_tc_event(struct isci_request *ireq, u32 completion_code) { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): /* In the AWAIT RESPONSE state, any TC completion is * unexpected. but if the TC has success status, we * complete the IO anyway. */ ireq->scu_status = SCU_TASK_DONE_GOOD; ireq->sci_status = SCI_SUCCESS; sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR): /* These status has been seen in a specific LSI * expander, which sometimes is not able to send smp * response within 2 ms. This causes our hardware break * the connection and set TC completion with one of * these SMP_XXX_XX_ERR status. For these type of error, * we ask ihost user to retry the request. */ ireq->scu_status = SCU_TASK_DONE_SMP_RESP_TO_ERR; ireq->sci_status = SCI_FAILURE_RETRY_REQUIRED; sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; default: /* All other completion status cause the IO to be complete. If a NAK * was received, then it is up to the user to retry the request */ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; } return SCI_SUCCESS; } static enum sci_status smp_request_await_tc_event(struct isci_request *ireq, u32 completion_code) { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): ireq->scu_status = SCU_TASK_DONE_GOOD; ireq->sci_status = SCI_SUCCESS; sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; default: /* All other completion status cause the IO to be * complete. If a NAK was received, then it is up to * the user to retry the request. */ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; } return SCI_SUCCESS; } static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req) { struct scu_sgl_element *sgl; struct scu_sgl_element_pair *sgl_pair; struct isci_request *ireq = to_ireq(stp_req); struct isci_stp_pio_sgl *pio_sgl = &stp_req->sgl; sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index); if (!sgl_pair) sgl = NULL; else if (pio_sgl->set == SCU_SGL_ELEMENT_PAIR_A) { if (sgl_pair->B.address_lower == 0 && sgl_pair->B.address_upper == 0) { sgl = NULL; } else { pio_sgl->set = SCU_SGL_ELEMENT_PAIR_B; sgl = &sgl_pair->B; } } else { if (sgl_pair->next_pair_lower == 0 && sgl_pair->next_pair_upper == 0) { sgl = NULL; } else { pio_sgl->index++; pio_sgl->set = SCU_SGL_ELEMENT_PAIR_A; sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index); sgl = &sgl_pair->A; } } return sgl; } static enum sci_status stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq, u32 completion_code) { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): ireq->scu_status = SCU_TASK_DONE_GOOD; ireq->sci_status = SCI_SUCCESS; sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H); break; default: /* All other completion status cause the IO to be * complete. If a NAK was received, then it is up to * the user to retry the request. */ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; } return SCI_SUCCESS; } #define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */ /* transmit DATA_FIS from (current sgl + offset) for input * parameter length. current sgl and offset is alreay stored in the IO request */ static enum sci_status sci_stp_request_pio_data_out_trasmit_data_frame( struct isci_request *ireq, u32 length) { struct isci_stp_request *stp_req = &ireq->stp.req; struct scu_task_context *task_context = ireq->tc; struct scu_sgl_element_pair *sgl_pair; struct scu_sgl_element *current_sgl; /* Recycle the TC and reconstruct it for sending out DATA FIS containing * for the data from current_sgl+offset for the input length */ sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index); if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) current_sgl = &sgl_pair->A; else current_sgl = &sgl_pair->B; /* update the TC */ task_context->command_iu_upper = current_sgl->address_upper; task_context->command_iu_lower = current_sgl->address_lower; task_context->transfer_length_bytes = length; task_context->type.stp.fis_type = FIS_DATA; /* send the new TC out. */ return sci_controller_continue_io(ireq); } static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_request *ireq) { struct isci_stp_request *stp_req = &ireq->stp.req; struct scu_sgl_element_pair *sgl_pair; enum sci_status status = SCI_SUCCESS; struct scu_sgl_element *sgl; u32 offset; u32 len = 0; offset = stp_req->sgl.offset; sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index); if (WARN_ONCE(!sgl_pair, "%s: null sgl element", __func__)) return SCI_FAILURE; if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) { sgl = &sgl_pair->A; len = sgl_pair->A.length - offset; } else { sgl = &sgl_pair->B; len = sgl_pair->B.length - offset; } if (stp_req->pio_len == 0) return SCI_SUCCESS; if (stp_req->pio_len >= len) { status = sci_stp_request_pio_data_out_trasmit_data_frame(ireq, len); if (status != SCI_SUCCESS) return status; stp_req->pio_len -= len; /* update the current sgl, offset and save for future */ sgl = pio_sgl_next(stp_req); offset = 0; } else if (stp_req->pio_len < len) { sci_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len); /* Sgl offset will be adjusted and saved for future */ offset += stp_req->pio_len; sgl->address_lower += stp_req->pio_len; stp_req->pio_len = 0; } stp_req->sgl.offset = offset; return status; } /** * sci_stp_request_pio_data_in_copy_data_buffer() * @stp_req: The request that is used for the SGL processing. * @data_buf: The buffer of data to be copied. * @len: The length of the data transfer. * * Copy the data from the buffer for the length specified to the IO request SGL * specified data region. enum sci_status */ static enum sci_status sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req, u8 *data_buf, u32 len) { struct isci_request *ireq; u8 *src_addr; int copy_len; struct sas_task *task; struct scatterlist *sg; void *kaddr; int total_len = len; ireq = to_ireq(stp_req); task = isci_request_access_task(ireq); src_addr = data_buf; if (task->num_scatter > 0) { sg = task->scatter; while (total_len > 0) { struct page *page = sg_page(sg); copy_len = min_t(int, total_len, sg_dma_len(sg)); kaddr = kmap_atomic(page); memcpy(kaddr + sg->offset, src_addr, copy_len); kunmap_atomic(kaddr); total_len -= copy_len; src_addr += copy_len; sg = sg_next(sg); } } else { BUG_ON(task->total_xfer_len < total_len); memcpy(task->scatter, src_addr, total_len); } return SCI_SUCCESS; } /** * sci_stp_request_pio_data_in_copy_data() * @stp_req: The PIO DATA IN request that is to receive the data. * @data_buffer: The buffer to copy from. * * Copy the data buffer to the io request data region. enum sci_status */ static enum sci_status sci_stp_request_pio_data_in_copy_data( struct isci_stp_request *stp_req, u8 *data_buffer) { enum sci_status status; /* * If there is less than 1K remaining in the transfer request * copy just the data for the transfer */ if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) { status = sci_stp_request_pio_data_in_copy_data_buffer( stp_req, data_buffer, stp_req->pio_len); if (status == SCI_SUCCESS) stp_req->pio_len = 0; } else { /* We are transfering the whole frame so copy */ status = sci_stp_request_pio_data_in_copy_data_buffer( stp_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE); if (status == SCI_SUCCESS) stp_req->pio_len -= SCU_MAX_FRAME_BUFFER_SIZE; } return status; } static enum sci_status stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq, u32 completion_code) { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): ireq->scu_status = SCU_TASK_DONE_GOOD; ireq->sci_status = SCI_SUCCESS; sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); break; default: /* All other completion status cause the IO to be * complete. If a NAK was received, then it is up to * the user to retry the request. */ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; } return SCI_SUCCESS; } static enum sci_status pio_data_out_tx_done_tc_event(struct isci_request *ireq, u32 completion_code) { enum sci_status status = SCI_SUCCESS; bool all_frames_transferred = false; struct isci_stp_request *stp_req = &ireq->stp.req; switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): /* Transmit data */ if (stp_req->pio_len != 0) { status = sci_stp_request_pio_data_out_transmit_data(ireq); if (status == SCI_SUCCESS) { if (stp_req->pio_len == 0) all_frames_transferred = true; } } else if (stp_req->pio_len == 0) { /* * this will happen if the all data is written at the * first time after the pio setup fis is received */ all_frames_transferred = true; } /* all data transferred. */ if (all_frames_transferred) { /* * Change the state to SCI_REQ_STP_PIO_DATA_IN * and wait for PIO_SETUP fis / or D2H REg fis. */ sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); } break; default: /* * All other completion status cause the IO to be complete. * If a NAK was received, then it is up to the user to retry * the request. */ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; } return status; } static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_request *ireq, u32 frame_index) { struct isci_host *ihost = ireq->owning_controller; struct dev_to_host_fis *frame_header; enum sci_status status; u32 *frame_buffer; status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, frame_index, (void **)&frame_header); if ((status == SCI_SUCCESS) && (frame_header->fis_type == FIS_REGD2H)) { sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, frame_index, (void **)&frame_buffer); sci_controller_copy_sata_response(&ireq->stp.rsp, frame_header, frame_buffer); } sci_controller_release_frame(ihost, frame_index); return status; } static enum sci_status process_unsolicited_fis(struct isci_request *ireq, u32 frame_index) { struct isci_host *ihost = ireq->owning_controller; enum sci_status status; struct dev_to_host_fis *frame_header; u32 *frame_buffer; status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, frame_index, (void **)&frame_header); if (status != SCI_SUCCESS) return status; if (frame_header->fis_type != FIS_REGD2H) { dev_err(&ireq->isci_host->pdev->dev, "%s ERROR: invalid fis type 0x%X\n", __func__, frame_header->fis_type); return SCI_FAILURE; } sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, frame_index, (void **)&frame_buffer); sci_controller_copy_sata_response(&ireq->stp.rsp, (u32 *)frame_header, frame_buffer); /* Frame has been decoded return it to the controller */ sci_controller_release_frame(ihost, frame_index); return status; } static enum sci_status atapi_d2h_reg_frame_handler(struct isci_request *ireq, u32 frame_index) { struct sas_task *task = isci_request_access_task(ireq); enum sci_status status; status = process_unsolicited_fis(ireq, frame_index); if (status == SCI_SUCCESS) { if (ireq->stp.rsp.status & ATA_ERR) status = SCI_FAILURE_IO_RESPONSE_VALID; } else { status = SCI_FAILURE_IO_RESPONSE_VALID; } if (status != SCI_SUCCESS) { ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; ireq->sci_status = status; } else { ireq->scu_status = SCU_TASK_DONE_GOOD; ireq->sci_status = SCI_SUCCESS; } /* the d2h ufi is the end of non-data commands */ if (task->data_dir == DMA_NONE) sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); return status; } static void scu_atapi_reconstruct_raw_frame_task_context(struct isci_request *ireq) { struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev); void *atapi_cdb = ireq->ttype_ptr.io_task_ptr->ata_task.atapi_packet; struct scu_task_context *task_context = ireq->tc; /* fill in the SCU Task Context for a DATA fis containing CDB in Raw Frame * type. The TC for previous Packet fis was already there, we only need to * change the H2D fis content. */ memset(&ireq->stp.cmd, 0, sizeof(struct host_to_dev_fis)); memcpy(((u8 *)&ireq->stp.cmd + sizeof(u32)), atapi_cdb, ATAPI_CDB_LEN); memset(&(task_context->type.stp), 0, sizeof(struct stp_task_context)); task_context->type.stp.fis_type = FIS_DATA; task_context->transfer_length_bytes = dev->cdb_len; } static void scu_atapi_construct_task_context(struct isci_request *ireq) { struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev); struct sas_task *task = isci_request_access_task(ireq); struct scu_task_context *task_context = ireq->tc; int cdb_len = dev->cdb_len; /* reference: SSTL 1.13.4.2 * task_type, sata_direction */ if (task->data_dir == DMA_TO_DEVICE) { task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_OUT; task_context->sata_direction = 0; } else { /* todo: for NO_DATA command, we need to send out raw frame. */ task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_IN; task_context->sata_direction = 1; } memset(&task_context->type.stp, 0, sizeof(task_context->type.stp)); task_context->type.stp.fis_type = FIS_DATA; memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd)); memcpy(&ireq->stp.cmd.lbal, task->ata_task.atapi_packet, cdb_len); task_context->ssp_command_iu_length = cdb_len / sizeof(u32); /* task phase is set to TX_CMD */ task_context->task_phase = 0x1; /* retry counter */ task_context->stp_retry_count = 0; /* data transfer size. */ task_context->transfer_length_bytes = task->total_xfer_len; /* setup sgl */ sci_request_build_sgl(ireq); } enum sci_status sci_io_request_frame_handler(struct isci_request *ireq, u32 frame_index) { struct isci_host *ihost = ireq->owning_controller; struct isci_stp_request *stp_req = &ireq->stp.req; enum sci_base_request_states state; enum sci_status status; ssize_t word_cnt; state = ireq->sm.current_state_id; switch (state) { case SCI_REQ_STARTED: { struct ssp_frame_hdr ssp_hdr; void *frame_header; sci_unsolicited_frame_control_get_header(&ihost->uf_control, frame_index, &frame_header); word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32); sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt); if (ssp_hdr.frame_type == SSP_RESPONSE) { struct ssp_response_iu *resp_iu; ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, frame_index, (void **)&resp_iu); sci_swab32_cpy(&ireq->ssp.rsp, resp_iu, word_cnt); resp_iu = &ireq->ssp.rsp; if (resp_iu->datapres == SAS_DATAPRES_RESPONSE_DATA || resp_iu->datapres == SAS_DATAPRES_SENSE_DATA) { ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; } else { ireq->scu_status = SCU_TASK_DONE_GOOD; ireq->sci_status = SCI_SUCCESS; } } else { /* not a response frame, why did it get forwarded? */ dev_err(&ihost->pdev->dev, "%s: SCIC IO Request 0x%p received unexpected " "frame %d type 0x%02x\n", __func__, ireq, frame_index, ssp_hdr.frame_type); } /* * In any case we are done with this frame buffer return it to * the controller */ sci_controller_release_frame(ihost, frame_index); return SCI_SUCCESS; } case SCI_REQ_TASK_WAIT_TC_RESP: sci_io_request_copy_response(ireq); sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); sci_controller_release_frame(ihost, frame_index); return SCI_SUCCESS; case SCI_REQ_SMP_WAIT_RESP: { struct sas_task *task = isci_request_access_task(ireq); struct scatterlist *sg = &task->smp_task.smp_resp; void *frame_header, *kaddr; u8 *rsp; sci_unsolicited_frame_control_get_header(&ihost->uf_control, frame_index, &frame_header); kaddr = kmap_atomic(sg_page(sg)); rsp = kaddr + sg->offset; sci_swab32_cpy(rsp, frame_header, 1); if (rsp[0] == SMP_RESPONSE) { void *smp_resp; sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, frame_index, &smp_resp); word_cnt = (sg->length/4)-1; if (word_cnt > 0) word_cnt = min_t(unsigned int, word_cnt, SCU_UNSOLICITED_FRAME_BUFFER_SIZE/4); sci_swab32_cpy(rsp + 4, smp_resp, word_cnt); ireq->scu_status = SCU_TASK_DONE_GOOD; ireq->sci_status = SCI_SUCCESS; sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP); } else { /* * This was not a response frame why did it get * forwarded? */ dev_err(&ihost->pdev->dev, "%s: SCIC SMP Request 0x%p received unexpected " "frame %d type 0x%02x\n", __func__, ireq, frame_index, rsp[0]); ireq->scu_status = SCU_TASK_DONE_SMP_FRM_TYPE_ERR; ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); } kunmap_atomic(kaddr); sci_controller_release_frame(ihost, frame_index); return SCI_SUCCESS; } case SCI_REQ_STP_UDMA_WAIT_TC_COMP: return sci_stp_request_udma_general_frame_handler(ireq, frame_index); case SCI_REQ_STP_UDMA_WAIT_D2H: /* Use the general frame handler to copy the resposne data */ status = sci_stp_request_udma_general_frame_handler(ireq, frame_index); if (status != SCI_SUCCESS) return status; ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); return SCI_SUCCESS; case SCI_REQ_STP_NON_DATA_WAIT_D2H: { struct dev_to_host_fis *frame_header; u32 *frame_buffer; status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, frame_index, (void **)&frame_header); if (status != SCI_SUCCESS) { dev_err(&ihost->pdev->dev, "%s: SCIC IO Request 0x%p could not get frame " "header for frame index %d, status %x\n", __func__, stp_req, frame_index, status); return status; } switch (frame_header->fis_type) { case FIS_REGD2H: sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, frame_index, (void **)&frame_buffer); sci_controller_copy_sata_response(&ireq->stp.rsp, frame_header, frame_buffer); /* The command has completed with error */ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; break; default: dev_warn(&ihost->pdev->dev, "%s: IO Request:0x%p Frame Id:%d protocol " "violation occurred\n", __func__, stp_req, frame_index); ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS; ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION; break; } sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); /* Frame has been decoded return it to the controller */ sci_controller_release_frame(ihost, frame_index); return status; } case SCI_REQ_STP_PIO_WAIT_FRAME: { struct sas_task *task = isci_request_access_task(ireq); struct dev_to_host_fis *frame_header; u32 *frame_buffer; status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, frame_index, (void **)&frame_header); if (status != SCI_SUCCESS) { dev_err(&ihost->pdev->dev, "%s: SCIC IO Request 0x%p could not get frame " "header for frame index %d, status %x\n", __func__, stp_req, frame_index, status); return status; } switch (frame_header->fis_type) { case FIS_PIO_SETUP: /* Get from the frame buffer the PIO Setup Data */ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, frame_index, (void **)&frame_buffer); /* Get the data from the PIO Setup The SCU Hardware * returns first word in the frame_header and the rest * of the data is in the frame buffer so we need to * back up one dword */ /* transfer_count: first 16bits in the 4th dword */ stp_req->pio_len = frame_buffer[3] & 0xffff; /* status: 4th byte in the 3rd dword */ stp_req->status = (frame_buffer[2] >> 24) & 0xff; sci_controller_copy_sata_response(&ireq->stp.rsp, frame_header, frame_buffer); ireq->stp.rsp.status = stp_req->status; /* The next state is dependent on whether the * request was PIO Data-in or Data out */ if (task->data_dir == DMA_FROM_DEVICE) { sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN); } else if (task->data_dir == DMA_TO_DEVICE) { /* Transmit data */ status = sci_stp_request_pio_data_out_transmit_data(ireq); if (status != SCI_SUCCESS) break; sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT); } break; case FIS_SETDEVBITS: sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); break; case FIS_REGD2H: if (frame_header->status & ATA_BUSY) { /* * Now why is the drive sending a D2H Register * FIS when it is still busy? Do nothing since * we are still in the right state. */ dev_dbg(&ihost->pdev->dev, "%s: SCIC PIO Request 0x%p received " "D2H Register FIS with BSY status " "0x%x\n", __func__, stp_req, frame_header->status); break; } sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, frame_index, (void **)&frame_buffer); sci_controller_copy_sata_response(&ireq->stp.rsp, frame_header, frame_buffer); ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; default: /* FIXME: what do we do here? */ break; } /* Frame is decoded return it to the controller */ sci_controller_release_frame(ihost, frame_index); return status; } case SCI_REQ_STP_PIO_DATA_IN: { struct dev_to_host_fis *frame_header; struct sata_fis_data *frame_buffer; status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, frame_index, (void **)&frame_header); if (status != SCI_SUCCESS) { dev_err(&ihost->pdev->dev, "%s: SCIC IO Request 0x%p could not get frame " "header for frame index %d, status %x\n", __func__, stp_req, frame_index, status); return status; } if (frame_header->fis_type != FIS_DATA) { dev_err(&ihost->pdev->dev, "%s: SCIC PIO Request 0x%p received frame %d " "with fis type 0x%02x when expecting a data " "fis.\n", __func__, stp_req, frame_index, frame_header->fis_type); ireq->scu_status = SCU_TASK_DONE_GOOD; ireq->sci_status = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT; sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); /* Frame is decoded return it to the controller */ sci_controller_release_frame(ihost, frame_index); return status; } if (stp_req->sgl.index < 0) { ireq->saved_rx_frame_index = frame_index; stp_req->pio_len = 0; } else { sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, frame_index, (void **)&frame_buffer); status = sci_stp_request_pio_data_in_copy_data(stp_req, (u8 *)frame_buffer); /* Frame is decoded return it to the controller */ sci_controller_release_frame(ihost, frame_index); } /* Check for the end of the transfer, are there more * bytes remaining for this data transfer */ if (status != SCI_SUCCESS || stp_req->pio_len != 0) return status; if ((stp_req->status & ATA_BUSY) == 0) { ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); } else { sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); } return status; } case SCI_REQ_ATAPI_WAIT_PIO_SETUP: { struct sas_task *task = isci_request_access_task(ireq); sci_controller_release_frame(ihost, frame_index); ireq->target_device->working_request = ireq; if (task->data_dir == DMA_NONE) { sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_TC_COMP); scu_atapi_reconstruct_raw_frame_task_context(ireq); } else { sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H); scu_atapi_construct_task_context(ireq); } sci_controller_continue_io(ireq); return SCI_SUCCESS; } case SCI_REQ_ATAPI_WAIT_D2H: return atapi_d2h_reg_frame_handler(ireq, frame_index); case SCI_REQ_ABORTING: /* * TODO: Is it even possible to get an unsolicited frame in the * aborting state? */ sci_controller_release_frame(ihost, frame_index); return SCI_SUCCESS; default: dev_warn(&ihost->pdev->dev, "%s: SCIC IO Request given unexpected frame %x while " "in state %d\n", __func__, frame_index, state); sci_controller_release_frame(ihost, frame_index); return SCI_FAILURE_INVALID_STATE; } } static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq, u32 completion_code) { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): ireq->scu_status = SCU_TASK_DONE_GOOD; ireq->sci_status = SCI_SUCCESS; sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): /* We must check ther response buffer to see if the D2H * Register FIS was received before we got the TC * completion. */ if (ireq->stp.rsp.fis_type == FIS_REGD2H) { sci_remote_device_suspend(ireq->target_device, SCI_SW_SUSPEND_NORMAL); ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); } else { /* If we have an error completion status for the * TC then we can expect a D2H register FIS from * the device so we must change state to wait * for it */ sci_change_state(&ireq->sm, SCI_REQ_STP_UDMA_WAIT_D2H); } break; /* TODO Check to see if any of these completion status need to * wait for the device to host register fis. */ /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR * - this comes only for B0 */ default: /* All other completion status cause the IO to be complete. */ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; } return SCI_SUCCESS; } static enum sci_status atapi_raw_completion(struct isci_request *ireq, u32 completion_code, enum sci_base_request_states next) { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): ireq->scu_status = SCU_TASK_DONE_GOOD; ireq->sci_status = SCI_SUCCESS; sci_change_state(&ireq->sm, next); break; default: /* All other completion status cause the IO to be complete. * If a NAK was received, then it is up to the user to retry * the request. */ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; } return SCI_SUCCESS; } static enum sci_status atapi_data_tc_completion_handler(struct isci_request *ireq, u32 completion_code) { struct isci_remote_device *idev = ireq->target_device; struct dev_to_host_fis *d2h = &ireq->stp.rsp; enum sci_status status = SCI_SUCCESS; switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; case (SCU_TASK_DONE_UNEXP_FIS << SCU_COMPLETION_TL_STATUS_SHIFT): { u16 len = sci_req_tx_bytes(ireq); /* likely non-error data underrun, workaround missing * d2h frame from the controller */ if (d2h->fis_type != FIS_REGD2H) { d2h->fis_type = FIS_REGD2H; d2h->flags = (1 << 6); d2h->status = 0x50; d2h->error = 0; d2h->lbal = 0; d2h->byte_count_low = len & 0xff; d2h->byte_count_high = len >> 8; d2h->device = 0xa0; d2h->lbal_exp = 0; d2h->lbam_exp = 0; d2h->lbah_exp = 0; d2h->_r_a = 0; d2h->sector_count = 0x3; d2h->sector_count_exp = 0; d2h->_r_b = 0; d2h->_r_c = 0; d2h->_r_d = 0; } ireq->scu_status = SCU_TASK_DONE_GOOD; ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY; status = ireq->sci_status; /* the hw will have suspended the rnc, so complete the * request upon pending resume */ sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR); break; } case (SCU_TASK_DONE_EXCESS_DATA << SCU_COMPLETION_TL_STATUS_SHIFT): /* In this case, there is no UF coming after. * compelte the IO now. */ ireq->scu_status = SCU_TASK_DONE_GOOD; ireq->sci_status = SCI_SUCCESS; sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; default: if (d2h->fis_type == FIS_REGD2H) { /* UF received change the device state to ATAPI_ERROR */ status = ireq->sci_status; sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR); } else { /* If receiving any non-success TC status, no UF * received yet, then an UF for the status fis * is coming after (XXX: suspect this is * actually a protocol error or a bug like the * DONE_UNEXP_FIS case) */ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H); } break; } return status; } static int sci_request_smp_completion_status_is_tx_suspend( unsigned int completion_status) { switch (completion_status) { case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: return 1; } return 0; } static int sci_request_smp_completion_status_is_tx_rx_suspend( unsigned int completion_status) { return 0; /* There are no Tx/Rx SMP suspend conditions. */ } static int sci_request_ssp_completion_status_is_tx_suspend( unsigned int completion_status) { switch (completion_status) { case SCU_TASK_DONE_TX_RAW_CMD_ERR: case SCU_TASK_DONE_LF_ERR: case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: return 1; } return 0; } static int sci_request_ssp_completion_status_is_tx_rx_suspend( unsigned int completion_status) { return 0; /* There are no Tx/Rx SSP suspend conditions. */ } static int sci_request_stpsata_completion_status_is_tx_suspend( unsigned int completion_status) { switch (completion_status) { case SCU_TASK_DONE_TX_RAW_CMD_ERR: case SCU_TASK_DONE_LL_R_ERR: case SCU_TASK_DONE_LL_PERR: case SCU_TASK_DONE_REG_ERR: case SCU_TASK_DONE_SDB_ERR: case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: return 1; } return 0; } static int sci_request_stpsata_completion_status_is_tx_rx_suspend( unsigned int completion_status) { switch (completion_status) { case SCU_TASK_DONE_LF_ERR: case SCU_TASK_DONE_LL_SY_TERM: case SCU_TASK_DONE_LL_LF_TERM: case SCU_TASK_DONE_BREAK_RCVD: case SCU_TASK_DONE_INV_FIS_LEN: case SCU_TASK_DONE_UNEXP_FIS: case SCU_TASK_DONE_UNEXP_SDBFIS: case SCU_TASK_DONE_MAX_PLD_ERR: return 1; } return 0; } static void sci_request_handle_suspending_completions( struct isci_request *ireq, u32 completion_code) { int is_tx = 0; int is_tx_rx = 0; switch (ireq->protocol) { case SAS_PROTOCOL_SMP: is_tx = sci_request_smp_completion_status_is_tx_suspend( completion_code); is_tx_rx = sci_request_smp_completion_status_is_tx_rx_suspend( completion_code); break; case SAS_PROTOCOL_SSP: is_tx = sci_request_ssp_completion_status_is_tx_suspend( completion_code); is_tx_rx = sci_request_ssp_completion_status_is_tx_rx_suspend( completion_code); break; case SAS_PROTOCOL_STP: is_tx = sci_request_stpsata_completion_status_is_tx_suspend( completion_code); is_tx_rx = sci_request_stpsata_completion_status_is_tx_rx_suspend( completion_code); break; default: dev_warn(&ireq->isci_host->pdev->dev, "%s: request %p has no valid protocol\n", __func__, ireq); break; } if (is_tx || is_tx_rx) { BUG_ON(is_tx && is_tx_rx); sci_remote_node_context_suspend( &ireq->target_device->rnc, SCI_HW_SUSPEND, (is_tx_rx) ? SCU_EVENT_TL_RNC_SUSPEND_TX_RX : SCU_EVENT_TL_RNC_SUSPEND_TX); } } enum sci_status sci_io_request_tc_completion(struct isci_request *ireq, u32 completion_code) { enum sci_base_request_states state; struct isci_host *ihost = ireq->owning_controller; state = ireq->sm.current_state_id; /* Decode those completions that signal upcoming suspension events. */ sci_request_handle_suspending_completions( ireq, SCU_GET_COMPLETION_TL_STATUS(completion_code)); switch (state) { case SCI_REQ_STARTED: return request_started_state_tc_event(ireq, completion_code); case SCI_REQ_TASK_WAIT_TC_COMP: return ssp_task_request_await_tc_event(ireq, completion_code); case SCI_REQ_SMP_WAIT_RESP: return smp_request_await_response_tc_event(ireq, completion_code); case SCI_REQ_SMP_WAIT_TC_COMP: return smp_request_await_tc_event(ireq, completion_code); case SCI_REQ_STP_UDMA_WAIT_TC_COMP: return stp_request_udma_await_tc_event(ireq, completion_code); case SCI_REQ_STP_NON_DATA_WAIT_H2D: return stp_request_non_data_await_h2d_tc_event(ireq, completion_code); case SCI_REQ_STP_PIO_WAIT_H2D: return stp_request_pio_await_h2d_completion_tc_event(ireq, completion_code); case SCI_REQ_STP_PIO_DATA_OUT: return pio_data_out_tx_done_tc_event(ireq, completion_code); case SCI_REQ_ABORTING: return request_aborting_state_tc_event(ireq, completion_code); case SCI_REQ_ATAPI_WAIT_H2D: return atapi_raw_completion(ireq, completion_code, SCI_REQ_ATAPI_WAIT_PIO_SETUP); case SCI_REQ_ATAPI_WAIT_TC_COMP: return atapi_raw_completion(ireq, completion_code, SCI_REQ_ATAPI_WAIT_D2H); case SCI_REQ_ATAPI_WAIT_D2H: return atapi_data_tc_completion_handler(ireq, completion_code); default: dev_warn(&ihost->pdev->dev, "%s: %x in wrong state %s\n", __func__, completion_code, req_state_name(state)); return SCI_FAILURE_INVALID_STATE; } } /** * isci_request_process_response_iu() - This function sets the status and * response iu, in the task struct, from the request object for the upper * layer driver. * @task: This parameter is the task struct from the upper layer driver. * @resp_iu: This parameter points to the response iu of the completed request. * @dev: This parameter specifies the linux device struct. * * none. */ static void isci_request_process_response_iu( struct sas_task *task, struct ssp_response_iu *resp_iu, struct device *dev) { dev_dbg(dev, "%s: resp_iu = %p " "resp_iu->status = 0x%x,\nresp_iu->datapres = %d " "resp_iu->response_data_len = %x, " "resp_iu->sense_data_len = %x\nresponse data: ", __func__, resp_iu, resp_iu->status, resp_iu->datapres, resp_iu->response_data_len, resp_iu->sense_data_len); task->task_status.stat = resp_iu->status; /* libsas updates the task status fields based on the response iu. */ sas_ssp_task_response(dev, task, resp_iu); } /** * isci_request_set_open_reject_status() - This function prepares the I/O * completion for OPEN_REJECT conditions. * @request: This parameter is the completed isci_request object. * @task: This parameter is the task struct from the upper layer driver. * @response_ptr: This parameter specifies the service response for the I/O. * @status_ptr: This parameter specifies the exec status for the I/O. * @open_rej_reason: This parameter specifies the encoded reason for the * abandon-class reject. * * none. */ static void isci_request_set_open_reject_status( struct isci_request *request, struct sas_task *task, enum service_response *response_ptr, enum exec_status *status_ptr, enum sas_open_rej_reason open_rej_reason) { /* Task in the target is done. */ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); *response_ptr = SAS_TASK_UNDELIVERED; *status_ptr = SAS_OPEN_REJECT; task->task_status.open_rej_reason = open_rej_reason; } /** * isci_request_handle_controller_specific_errors() - This function decodes * controller-specific I/O completion error conditions. * @idev: Remote device * @request: This parameter is the completed isci_request object. * @task: This parameter is the task struct from the upper layer driver. * @response_ptr: This parameter specifies the service response for the I/O. * @status_ptr: This parameter specifies the exec status for the I/O. * * none. */ static void isci_request_handle_controller_specific_errors( struct isci_remote_device *idev, struct isci_request *request, struct sas_task *task, enum service_response *response_ptr, enum exec_status *status_ptr) { unsigned int cstatus; cstatus = request->scu_status; dev_dbg(&request->isci_host->pdev->dev, "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR " "- controller status = 0x%x\n", __func__, request, cstatus); /* Decode the controller-specific errors; most * important is to recognize those conditions in which * the target may still have a task outstanding that * must be aborted. * * Note that there are SCU completion codes being * named in the decode below for which SCIC has already * done work to handle them in a way other than as * a controller-specific completion code; these are left * in the decode below for completeness sake. */ switch (cstatus) { case SCU_TASK_DONE_DMASETUP_DIRERR: /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */ case SCU_TASK_DONE_XFERCNT_ERR: /* Also SCU_TASK_DONE_SMP_UFI_ERR: */ if (task->task_proto == SAS_PROTOCOL_SMP) { /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */ *response_ptr = SAS_TASK_COMPLETE; /* See if the device has been/is being stopped. Note * that we ignore the quiesce state, since we are * concerned about the actual device state. */ if (!idev) *status_ptr = SAS_DEVICE_UNKNOWN; else *status_ptr = SAS_ABORTED_TASK; set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); } else { /* Task in the target is not done. */ *response_ptr = SAS_TASK_UNDELIVERED; if (!idev) *status_ptr = SAS_DEVICE_UNKNOWN; else *status_ptr = SAS_SAM_STAT_TASK_ABORTED; clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); } break; case SCU_TASK_DONE_CRC_ERR: case SCU_TASK_DONE_NAK_CMD_ERR: case SCU_TASK_DONE_EXCESS_DATA: case SCU_TASK_DONE_UNEXP_FIS: /* Also SCU_TASK_DONE_UNEXP_RESP: */ case SCU_TASK_DONE_VIIT_ENTRY_NV: /* TODO - conditions? */ case SCU_TASK_DONE_IIT_ENTRY_NV: /* TODO - conditions? */ case SCU_TASK_DONE_RNCNV_OUTBOUND: /* TODO - conditions? */ /* These are conditions in which the target * has completed the task, so that no cleanup * is necessary. */ *response_ptr = SAS_TASK_COMPLETE; /* See if the device has been/is being stopped. Note * that we ignore the quiesce state, since we are * concerned about the actual device state. */ if (!idev) *status_ptr = SAS_DEVICE_UNKNOWN; else *status_ptr = SAS_ABORTED_TASK; set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); break; /* Note that the only open reject completion codes seen here will be * abandon-class codes; all others are automatically retried in the SCU. */ case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: isci_request_set_open_reject_status( request, task, response_ptr, status_ptr, SAS_OREJ_WRONG_DEST); break; case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: /* Note - the return of AB0 will change when * libsas implements detection of zone violations. */ isci_request_set_open_reject_status( request, task, response_ptr, status_ptr, SAS_OREJ_RESV_AB0); break; case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: isci_request_set_open_reject_status( request, task, response_ptr, status_ptr, SAS_OREJ_RESV_AB1); break; case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: isci_request_set_open_reject_status( request, task, response_ptr, status_ptr, SAS_OREJ_RESV_AB2); break; case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: isci_request_set_open_reject_status( request, task, response_ptr, status_ptr, SAS_OREJ_RESV_AB3); break; case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: isci_request_set_open_reject_status( request, task, response_ptr, status_ptr, SAS_OREJ_BAD_DEST); break; case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: isci_request_set_open_reject_status( request, task, response_ptr, status_ptr, SAS_OREJ_STP_NORES); break; case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: isci_request_set_open_reject_status( request, task, response_ptr, status_ptr, SAS_OREJ_EPROTO); break; case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: isci_request_set_open_reject_status( request, task, response_ptr, status_ptr, SAS_OREJ_CONN_RATE); break; case SCU_TASK_DONE_LL_R_ERR: /* Also SCU_TASK_DONE_ACK_NAK_TO: */ case SCU_TASK_DONE_LL_PERR: case SCU_TASK_DONE_LL_SY_TERM: /* Also SCU_TASK_DONE_NAK_ERR:*/ case SCU_TASK_DONE_LL_LF_TERM: /* Also SCU_TASK_DONE_DATA_LEN_ERR: */ case SCU_TASK_DONE_LL_ABORT_ERR: case SCU_TASK_DONE_SEQ_INV_TYPE: /* Also SCU_TASK_DONE_UNEXP_XR: */ case SCU_TASK_DONE_XR_IU_LEN_ERR: case SCU_TASK_DONE_INV_FIS_LEN: /* Also SCU_TASK_DONE_XR_WD_LEN: */ case SCU_TASK_DONE_SDMA_ERR: case SCU_TASK_DONE_OFFSET_ERR: case SCU_TASK_DONE_MAX_PLD_ERR: case SCU_TASK_DONE_LF_ERR: case SCU_TASK_DONE_SMP_RESP_TO_ERR: /* Escalate to dev reset? */ case SCU_TASK_DONE_SMP_LL_RX_ERR: case SCU_TASK_DONE_UNEXP_DATA: case SCU_TASK_DONE_UNEXP_SDBFIS: case SCU_TASK_DONE_REG_ERR: case SCU_TASK_DONE_SDB_ERR: case SCU_TASK_DONE_TASK_ABORT: default: /* Task in the target is not done. */ *response_ptr = SAS_TASK_UNDELIVERED; *status_ptr = SAS_SAM_STAT_TASK_ABORTED; if (task->task_proto == SAS_PROTOCOL_SMP) set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); else clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); break; } } static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis) { struct task_status_struct *ts = &task->task_status; struct ata_task_resp *resp = (void *)&ts->buf[0]; resp->frame_len = sizeof(*fis); memcpy(resp->ending_fis, fis, sizeof(*fis)); ts->buf_valid_size = sizeof(*resp); /* If an error is flagged let libata decode the fis */ if (ac_err_mask(fis->status)) ts->stat = SAS_PROTO_RESPONSE; else ts->stat = SAS_SAM_STAT_GOOD; ts->resp = SAS_TASK_COMPLETE; } static void isci_request_io_request_complete(struct isci_host *ihost, struct isci_request *request, enum sci_io_status completion_status) { struct sas_task *task = isci_request_access_task(request); struct ssp_response_iu *resp_iu; unsigned long task_flags; struct isci_remote_device *idev = request->target_device; enum service_response response = SAS_TASK_UNDELIVERED; enum exec_status status = SAS_ABORTED_TASK; dev_dbg(&ihost->pdev->dev, "%s: request = %p, task = %p, " "task->data_dir = %d completion_status = 0x%x\n", __func__, request, task, task->data_dir, completion_status); /* The request is done from an SCU HW perspective. */ /* This is an active request being completed from the core. */ switch (completion_status) { case SCI_IO_FAILURE_RESPONSE_VALID: dev_dbg(&ihost->pdev->dev, "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n", __func__, request, task); if (sas_protocol_ata(task->task_proto)) { isci_process_stp_response(task, &request->stp.rsp); } else if (SAS_PROTOCOL_SSP == task->task_proto) { /* crack the iu response buffer. */ resp_iu = &request->ssp.rsp; isci_request_process_response_iu(task, resp_iu, &ihost->pdev->dev); } else if (SAS_PROTOCOL_SMP == task->task_proto) { dev_err(&ihost->pdev->dev, "%s: SCI_IO_FAILURE_RESPONSE_VALID: " "SAS_PROTOCOL_SMP protocol\n", __func__); } else dev_err(&ihost->pdev->dev, "%s: unknown protocol\n", __func__); /* use the task status set in the task struct by the * isci_request_process_response_iu call. */ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); response = task->task_status.resp; status = task->task_status.stat; break; case SCI_IO_SUCCESS: case SCI_IO_SUCCESS_IO_DONE_EARLY: response = SAS_TASK_COMPLETE; status = SAS_SAM_STAT_GOOD; set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) { /* This was an SSP / STP / SATA transfer. * There is a possibility that less data than * the maximum was transferred. */ u32 transferred_length = sci_req_tx_bytes(request); task->task_status.residual = task->total_xfer_len - transferred_length; /* If there were residual bytes, call this an * underrun. */ if (task->task_status.residual != 0) status = SAS_DATA_UNDERRUN; dev_dbg(&ihost->pdev->dev, "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n", __func__, status); } else dev_dbg(&ihost->pdev->dev, "%s: SCI_IO_SUCCESS\n", __func__); break; case SCI_IO_FAILURE_TERMINATED: dev_dbg(&ihost->pdev->dev, "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n", __func__, request, task); /* The request was terminated explicitly. */ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); response = SAS_TASK_UNDELIVERED; /* See if the device has been/is being stopped. Note * that we ignore the quiesce state, since we are * concerned about the actual device state. */ if (!idev) status = SAS_DEVICE_UNKNOWN; else status = SAS_ABORTED_TASK; break; case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR: isci_request_handle_controller_specific_errors(idev, request, task, &response, &status); break; case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED: /* This is a special case, in that the I/O completion * is telling us that the device needs a reset. * In order for the device reset condition to be * noticed, the I/O has to be handled in the error * handler. Set the reset flag and cause the * SCSI error thread to be scheduled. */ spin_lock_irqsave(&task->task_state_lock, task_flags); task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; spin_unlock_irqrestore(&task->task_state_lock, task_flags); /* Fail the I/O. */ response = SAS_TASK_UNDELIVERED; status = SAS_SAM_STAT_TASK_ABORTED; clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); break; case SCI_FAILURE_RETRY_REQUIRED: /* Fail the I/O so it can be retried. */ response = SAS_TASK_UNDELIVERED; if (!idev) status = SAS_DEVICE_UNKNOWN; else status = SAS_ABORTED_TASK; set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); break; default: /* Catch any otherwise unhandled error codes here. */ dev_dbg(&ihost->pdev->dev, "%s: invalid completion code: 0x%x - " "isci_request = %p\n", __func__, completion_status, request); response = SAS_TASK_UNDELIVERED; /* See if the device has been/is being stopped. Note * that we ignore the quiesce state, since we are * concerned about the actual device state. */ if (!idev) status = SAS_DEVICE_UNKNOWN; else status = SAS_ABORTED_TASK; if (SAS_PROTOCOL_SMP == task->task_proto) set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); else clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); break; } switch (task->task_proto) { case SAS_PROTOCOL_SSP: if (task->data_dir == DMA_NONE) break; if (task->num_scatter == 0) /* 0 indicates a single dma address */ dma_unmap_single(&ihost->pdev->dev, request->zero_scatter_daddr, task->total_xfer_len, task->data_dir); else /* unmap the sgl dma addresses */ dma_unmap_sg(&ihost->pdev->dev, task->scatter, request->num_sg_entries, task->data_dir); break; case SAS_PROTOCOL_SMP: { struct scatterlist *sg = &task->smp_task.smp_req; struct smp_req *smp_req; void *kaddr; dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE); /* need to swab it back in case the command buffer is re-used */ kaddr = kmap_atomic(sg_page(sg)); smp_req = kaddr + sg->offset; sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32)); kunmap_atomic(kaddr); break; } default: break; } spin_lock_irqsave(&task->task_state_lock, task_flags); task->task_status.resp = response; task->task_status.stat = status; if (test_bit(IREQ_COMPLETE_IN_TARGET, &request->flags)) { /* Normal notification (task_done) */ task->task_state_flags |= SAS_TASK_STATE_DONE; task->task_state_flags &= ~SAS_TASK_STATE_PENDING; } spin_unlock_irqrestore(&task->task_state_lock, task_flags); /* complete the io request to the core. */ sci_controller_complete_io(ihost, request->target_device, request); /* set terminated handle so it cannot be completed or * terminated again, and to cause any calls into abort * task to recognize the already completed case. */ set_bit(IREQ_TERMINATED, &request->flags); ireq_done(ihost, request, task); } static void sci_request_started_state_enter(struct sci_base_state_machine *sm) { struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); struct domain_device *dev = ireq->target_device->domain_dev; enum sci_base_request_states state; struct sas_task *task; /* XXX as hch said always creating an internal sas_task for tmf * requests would simplify the driver */ task = (test_bit(IREQ_TMF, &ireq->flags)) ? NULL : isci_request_access_task(ireq); /* all unaccelerated request types (non ssp or ncq) handled with * substates */ if (!task && dev->dev_type == SAS_END_DEVICE) { state = SCI_REQ_TASK_WAIT_TC_COMP; } else if (task && task->task_proto == SAS_PROTOCOL_SMP) { state = SCI_REQ_SMP_WAIT_RESP; } else if (task && sas_protocol_ata(task->task_proto) && !task->ata_task.use_ncq) { if (dev->sata_dev.class == ATA_DEV_ATAPI && task->ata_task.fis.command == ATA_CMD_PACKET) { state = SCI_REQ_ATAPI_WAIT_H2D; } else if (task->data_dir == DMA_NONE) { state = SCI_REQ_STP_NON_DATA_WAIT_H2D; } else if (task->ata_task.dma_xfer) { state = SCI_REQ_STP_UDMA_WAIT_TC_COMP; } else /* PIO */ { state = SCI_REQ_STP_PIO_WAIT_H2D; } } else { /* SSP or NCQ are fully accelerated, no substates */ return; } sci_change_state(sm, state); } static void sci_request_completed_state_enter(struct sci_base_state_machine *sm) { struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); struct isci_host *ihost = ireq->owning_controller; /* Tell the SCI_USER that the IO request is complete */ if (!test_bit(IREQ_TMF, &ireq->flags)) isci_request_io_request_complete(ihost, ireq, ireq->sci_status); else isci_task_request_complete(ihost, ireq, ireq->sci_status); } static void sci_request_aborting_state_enter(struct sci_base_state_machine *sm) { struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); /* Setting the abort bit in the Task Context is required by the silicon. */ ireq->tc->abort = 1; } static void sci_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm) { struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); ireq->target_device->working_request = ireq; } static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm) { struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); ireq->target_device->working_request = ireq; } static const struct sci_base_state sci_request_state_table[] = { [SCI_REQ_INIT] = { }, [SCI_REQ_CONSTRUCTED] = { }, [SCI_REQ_STARTED] = { .enter_state = sci_request_started_state_enter, }, [SCI_REQ_STP_NON_DATA_WAIT_H2D] = { .enter_state = sci_stp_request_started_non_data_await_h2d_completion_enter, }, [SCI_REQ_STP_NON_DATA_WAIT_D2H] = { }, [SCI_REQ_STP_PIO_WAIT_H2D] = { .enter_state = sci_stp_request_started_pio_await_h2d_completion_enter, }, [SCI_REQ_STP_PIO_WAIT_FRAME] = { }, [SCI_REQ_STP_PIO_DATA_IN] = { }, [SCI_REQ_STP_PIO_DATA_OUT] = { }, [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { }, [SCI_REQ_STP_UDMA_WAIT_D2H] = { }, [SCI_REQ_TASK_WAIT_TC_COMP] = { }, [SCI_REQ_TASK_WAIT_TC_RESP] = { }, [SCI_REQ_SMP_WAIT_RESP] = { }, [SCI_REQ_SMP_WAIT_TC_COMP] = { }, [SCI_REQ_ATAPI_WAIT_H2D] = { }, [SCI_REQ_ATAPI_WAIT_PIO_SETUP] = { }, [SCI_REQ_ATAPI_WAIT_D2H] = { }, [SCI_REQ_ATAPI_WAIT_TC_COMP] = { }, [SCI_REQ_COMPLETED] = { .enter_state = sci_request_completed_state_enter, }, [SCI_REQ_ABORTING] = { .enter_state = sci_request_aborting_state_enter, }, [SCI_REQ_FINAL] = { }, }; static void sci_general_request_construct(struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq) { sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT); ireq->target_device = idev; ireq->protocol = SAS_PROTOCOL_NONE; ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX; ireq->sci_status = SCI_SUCCESS; ireq->scu_status = 0; ireq->post_context = 0xFFFFFFFF; } static enum sci_status sci_io_request_construct(struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq) { struct domain_device *dev = idev->domain_dev; enum sci_status status = SCI_SUCCESS; /* Build the common part of the request */ sci_general_request_construct(ihost, idev, ireq); if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) return SCI_FAILURE_INVALID_REMOTE_DEVICE; if (dev->dev_type == SAS_END_DEVICE) /* pass */; else if (dev_is_sata(dev)) memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd)); else if (dev_is_expander(dev->dev_type)) /* pass */; else return SCI_FAILURE_UNSUPPORTED_PROTOCOL; memset(ireq->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab)); return status; } enum sci_status sci_task_request_construct(struct isci_host *ihost, struct isci_remote_device *idev, u16 io_tag, struct isci_request *ireq) { struct domain_device *dev = idev->domain_dev; enum sci_status status = SCI_SUCCESS; /* Build the common part of the request */ sci_general_request_construct(ihost, idev, ireq); if (dev->dev_type == SAS_END_DEVICE || dev_is_sata(dev)) { set_bit(IREQ_TMF, &ireq->flags); memset(ireq->tc, 0, sizeof(struct scu_task_context)); /* Set the protocol indicator. */ if (dev_is_sata(dev)) ireq->protocol = SAS_PROTOCOL_STP; else ireq->protocol = SAS_PROTOCOL_SSP; } else status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; return status; } static enum sci_status isci_request_ssp_request_construct( struct isci_request *request) { enum sci_status status; dev_dbg(&request->isci_host->pdev->dev, "%s: request = %p\n", __func__, request); status = sci_io_request_construct_basic_ssp(request); return status; } static enum sci_status isci_request_stp_request_construct(struct isci_request *ireq) { struct sas_task *task = isci_request_access_task(ireq); struct host_to_dev_fis *fis = &ireq->stp.cmd; struct ata_queued_cmd *qc = task->uldd_task; enum sci_status status; dev_dbg(&ireq->isci_host->pdev->dev, "%s: ireq = %p\n", __func__, ireq); memcpy(fis, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); if (!task->ata_task.device_control_reg_update) fis->flags |= 0x80; fis->flags &= 0xF0; status = sci_io_request_construct_basic_sata(ireq); if (qc && (qc->tf.command == ATA_CMD_FPDMA_WRITE || qc->tf.command == ATA_CMD_FPDMA_READ || qc->tf.command == ATA_CMD_FPDMA_RECV || qc->tf.command == ATA_CMD_FPDMA_SEND || qc->tf.command == ATA_CMD_NCQ_NON_DATA)) { fis->sector_count = qc->tag << 3; ireq->tc->type.stp.ncq_tag = qc->tag; } return status; } static enum sci_status sci_io_request_construct_smp(struct device *dev, struct isci_request *ireq, struct sas_task *task) { struct scatterlist *sg = &task->smp_task.smp_req; struct isci_remote_device *idev; struct scu_task_context *task_context; struct isci_port *iport; struct smp_req *smp_req; void *kaddr; u8 req_len; u32 cmd; kaddr = kmap_atomic(sg_page(sg)); smp_req = kaddr + sg->offset; /* * Look at the SMP requests' header fields; for certain SAS 1.x SMP * functions under SAS 2.0, a zero request length really indicates * a non-zero default length. */ if (smp_req->req_len == 0) { switch (smp_req->func) { case SMP_DISCOVER: case SMP_REPORT_PHY_ERR_LOG: case SMP_REPORT_PHY_SATA: case SMP_REPORT_ROUTE_INFO: smp_req->req_len = 2; break; case SMP_CONF_ROUTE_INFO: case SMP_PHY_CONTROL: case SMP_PHY_TEST_FUNCTION: smp_req->req_len = 9; break; /* Default - zero is a valid default for 2.0. */ } } req_len = smp_req->req_len; sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32)); cmd = *(u32 *) smp_req; kunmap_atomic(kaddr); if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE)) return SCI_FAILURE; ireq->protocol = SAS_PROTOCOL_SMP; /* byte swap the smp request. */ task_context = ireq->tc; idev = ireq->target_device; iport = idev->owning_port; /* * Fill in the TC with its required data * 00h */ task_context->priority = 0; task_context->initiator_request = 1; task_context->connection_rate = idev->connection_rate; task_context->protocol_engine_index = ISCI_PEG; task_context->logical_port_index = iport->physical_port_index; task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP; task_context->abort = 0; task_context->valid = SCU_TASK_CONTEXT_VALID; task_context->context_type = SCU_TASK_CONTEXT_TYPE; /* 04h */ task_context->remote_node_index = idev->rnc.remote_node_index; task_context->command_code = 0; task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST; /* 08h */ task_context->link_layer_control = 0; task_context->do_not_dma_ssp_good_response = 1; task_context->strict_ordering = 0; task_context->control_frame = 1; task_context->timeout_enable = 0; task_context->block_guard_enable = 0; /* 0ch */ task_context->address_modifier = 0; /* 10h */ task_context->ssp_command_iu_length = req_len; /* 14h */ task_context->transfer_length_bytes = 0; /* * 18h ~ 30h, protocol specific * since commandIU has been build by framework at this point, we just * copy the frist DWord from command IU to this location. */ memcpy(&task_context->type.smp, &cmd, sizeof(u32)); /* * 40h * "For SMP you could program it to zero. We would prefer that way * so that done code will be consistent." - Venki */ task_context->task_phase = 0; ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | (iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | ISCI_TAG_TCI(ireq->io_tag)); /* * Copy the physical address for the command buffer to the SCU Task * Context command buffer should not contain command header. */ task_context->command_iu_upper = upper_32_bits(sg_dma_address(sg)); task_context->command_iu_lower = lower_32_bits(sg_dma_address(sg) + sizeof(u32)); /* SMP response comes as UF, so no need to set response IU address. */ task_context->response_iu_upper = 0; task_context->response_iu_lower = 0; sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); return SCI_SUCCESS; } /* * isci_smp_request_build() - This function builds the smp request. * @ireq: This parameter points to the isci_request allocated in the * request construct function. * * SCI_SUCCESS on successfull completion, or specific failure code. */ static enum sci_status isci_smp_request_build(struct isci_request *ireq) { struct sas_task *task = isci_request_access_task(ireq); struct device *dev = &ireq->isci_host->pdev->dev; enum sci_status status = SCI_FAILURE; status = sci_io_request_construct_smp(dev, ireq, task); if (status != SCI_SUCCESS) dev_dbg(&ireq->isci_host->pdev->dev, "%s: failed with status = %d\n", __func__, status); return status; } /** * isci_io_request_build() - This function builds the io request object. * @ihost: This parameter specifies the ISCI host object * @request: This parameter points to the isci_request object allocated in the * request construct function. * @idev: This parameter is the handle for the sci core's remote device * object that is the destination for this request. * * SCI_SUCCESS on successfull completion, or specific failure code. */ static enum sci_status isci_io_request_build(struct isci_host *ihost, struct isci_request *request, struct isci_remote_device *idev) { enum sci_status status = SCI_SUCCESS; struct sas_task *task = isci_request_access_task(request); dev_dbg(&ihost->pdev->dev, "%s: idev = 0x%p; request = %p, " "num_scatter = %d\n", __func__, idev, request, task->num_scatter); /* map the sgl addresses, if present. * libata does the mapping for sata devices * before we get the request. */ if (task->num_scatter && !sas_protocol_ata(task->task_proto) && !(SAS_PROTOCOL_SMP & task->task_proto)) { request->num_sg_entries = dma_map_sg( &ihost->pdev->dev, task->scatter, task->num_scatter, task->data_dir ); if (request->num_sg_entries == 0) return SCI_FAILURE_INSUFFICIENT_RESOURCES; } status = sci_io_request_construct(ihost, idev, request); if (status != SCI_SUCCESS) { dev_dbg(&ihost->pdev->dev, "%s: failed request construct\n", __func__); return SCI_FAILURE; } switch (task->task_proto) { case SAS_PROTOCOL_SMP: status = isci_smp_request_build(request); break; case SAS_PROTOCOL_SSP: status = isci_request_ssp_request_construct(request); break; case SAS_PROTOCOL_SATA: case SAS_PROTOCOL_STP: case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: status = isci_request_stp_request_construct(request); break; default: dev_dbg(&ihost->pdev->dev, "%s: unknown protocol\n", __func__); return SCI_FAILURE; } return SCI_SUCCESS; } static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag) { struct isci_request *ireq; ireq = ihost->reqs[ISCI_TAG_TCI(tag)]; ireq->io_tag = tag; ireq->io_request_completion = NULL; ireq->flags = 0; ireq->num_sg_entries = 0; return ireq; } struct isci_request *isci_io_request_from_tag(struct isci_host *ihost, struct sas_task *task, u16 tag) { struct isci_request *ireq; ireq = isci_request_from_tag(ihost, tag); ireq->ttype_ptr.io_task_ptr = task; clear_bit(IREQ_TMF, &ireq->flags); task->lldd_task = ireq; return ireq; } struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost, struct isci_tmf *isci_tmf, u16 tag) { struct isci_request *ireq; ireq = isci_request_from_tag(ihost, tag); ireq->ttype_ptr.tmf_task_ptr = isci_tmf; set_bit(IREQ_TMF, &ireq->flags); return ireq; } int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev, struct sas_task *task, struct isci_request *ireq) { enum sci_status status; unsigned long flags; int ret = 0; status = isci_io_request_build(ihost, ireq, idev); if (status != SCI_SUCCESS) { dev_dbg(&ihost->pdev->dev, "%s: request_construct failed - status = 0x%x\n", __func__, status); return status; } spin_lock_irqsave(&ihost->scic_lock, flags); if (test_bit(IDEV_IO_NCQERROR, &idev->flags)) { if (isci_task_is_ncq_recovery(task)) { /* The device is in an NCQ recovery state. Issue the * request on the task side. Note that it will * complete on the I/O request side because the * request was built that way (ie. * ireq->is_task_management_request is false). */ status = sci_controller_start_task(ihost, idev, ireq); } else { status = SCI_FAILURE; } } else { /* send the request, let the core assign the IO TAG. */ status = sci_controller_start_io(ihost, idev, ireq); } if (status != SCI_SUCCESS && status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { dev_dbg(&ihost->pdev->dev, "%s: failed request start (0x%x)\n", __func__, status); spin_unlock_irqrestore(&ihost->scic_lock, flags); return status; } /* Either I/O started OK, or the core has signaled that * the device needs a target reset. */ if (status != SCI_SUCCESS) { /* The request did not really start in the * hardware, so clear the request handle * here so no terminations will be done. */ set_bit(IREQ_TERMINATED, &ireq->flags); } spin_unlock_irqrestore(&ihost->scic_lock, flags); if (status == SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { /* Signal libsas that we need the SCSI error * handler thread to work on this I/O and that * we want a device reset. */ spin_lock_irqsave(&task->task_state_lock, flags); task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; spin_unlock_irqrestore(&task->task_state_lock, flags); /* Cause this task to be scheduled in the SCSI error * handler thread. */ sas_task_abort(task); /* Change the status, since we are holding * the I/O until it is managed by the SCSI * error handler. */ status = SCI_SUCCESS; } return ret; }
linux-master
drivers/scsi/isci/request.c
/* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * The full GNU General Public License is included in this distribution * in the file called LICENSE.GPL. * * BSD LICENSE * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <scsi/sas_ata.h> #include "host.h" #include "isci.h" #include "remote_device.h" #include "remote_node_context.h" #include "scu_event_codes.h" #include "scu_task_context.h" #undef C #define C(a) (#a) const char *rnc_state_name(enum scis_sds_remote_node_context_states state) { static const char * const strings[] = RNC_STATES; if (state >= ARRAY_SIZE(strings)) return "UNKNOWN"; return strings[state]; } #undef C /** * sci_remote_node_context_is_ready() * @sci_rnc: The state of the remote node context object to check. * * This method will return true if the remote node context is in a READY state * otherwise it will return false bool true if the remote node context is in * the ready state. false if the remote node context is not in the ready state. */ bool sci_remote_node_context_is_ready( struct sci_remote_node_context *sci_rnc) { u32 current_state = sci_rnc->sm.current_state_id; if (current_state == SCI_RNC_READY) { return true; } return false; } bool sci_remote_node_context_is_suspended(struct sci_remote_node_context *sci_rnc) { u32 current_state = sci_rnc->sm.current_state_id; if (current_state == SCI_RNC_TX_RX_SUSPENDED) return true; return false; } static union scu_remote_node_context *sci_rnc_by_id(struct isci_host *ihost, u16 id) { if (id < ihost->remote_node_entries && ihost->device_table[id]) return &ihost->remote_node_context_table[id]; return NULL; } static void sci_remote_node_context_construct_buffer(struct sci_remote_node_context *sci_rnc) { struct isci_remote_device *idev = rnc_to_dev(sci_rnc); struct domain_device *dev = idev->domain_dev; int rni = sci_rnc->remote_node_index; union scu_remote_node_context *rnc; struct isci_host *ihost; __le64 sas_addr; ihost = idev->owning_port->owning_controller; rnc = sci_rnc_by_id(ihost, rni); memset(rnc, 0, sizeof(union scu_remote_node_context) * sci_remote_device_node_count(idev)); rnc->ssp.remote_node_index = rni; rnc->ssp.remote_node_port_width = idev->device_port_width; rnc->ssp.logical_port_index = idev->owning_port->physical_port_index; /* sas address is __be64, context ram format is __le64 */ sas_addr = cpu_to_le64(SAS_ADDR(dev->sas_addr)); rnc->ssp.remote_sas_address_hi = upper_32_bits(sas_addr); rnc->ssp.remote_sas_address_lo = lower_32_bits(sas_addr); rnc->ssp.nexus_loss_timer_enable = true; rnc->ssp.check_bit = false; rnc->ssp.is_valid = false; rnc->ssp.is_remote_node_context = true; rnc->ssp.function_number = 0; rnc->ssp.arbitration_wait_time = 0; if (dev_is_sata(dev)) { rnc->ssp.connection_occupancy_timeout = ihost->user_parameters.stp_max_occupancy_timeout; rnc->ssp.connection_inactivity_timeout = ihost->user_parameters.stp_inactivity_timeout; } else { rnc->ssp.connection_occupancy_timeout = ihost->user_parameters.ssp_max_occupancy_timeout; rnc->ssp.connection_inactivity_timeout = ihost->user_parameters.ssp_inactivity_timeout; } rnc->ssp.initial_arbitration_wait_time = 0; /* Open Address Frame Parameters */ rnc->ssp.oaf_connection_rate = idev->connection_rate; rnc->ssp.oaf_features = 0; rnc->ssp.oaf_source_zone_group = 0; rnc->ssp.oaf_more_compatibility_features = 0; } /* * This method will setup the remote node context object so it will transition * to its ready state. If the remote node context is already setup to * transition to its final state then this function does nothing. none */ static void sci_remote_node_context_setup_to_resume( struct sci_remote_node_context *sci_rnc, scics_sds_remote_node_context_callback callback, void *callback_parameter, enum sci_remote_node_context_destination_state dest_param) { if (sci_rnc->destination_state != RNC_DEST_FINAL) { sci_rnc->destination_state = dest_param; if (callback != NULL) { sci_rnc->user_callback = callback; sci_rnc->user_cookie = callback_parameter; } } } static void sci_remote_node_context_setup_to_destroy( struct sci_remote_node_context *sci_rnc, scics_sds_remote_node_context_callback callback, void *callback_parameter) { struct isci_host *ihost = idev_to_ihost(rnc_to_dev(sci_rnc)); sci_rnc->destination_state = RNC_DEST_FINAL; sci_rnc->user_callback = callback; sci_rnc->user_cookie = callback_parameter; wake_up(&ihost->eventq); } /* * This method just calls the user callback function and then resets the * callback. */ static void sci_remote_node_context_notify_user( struct sci_remote_node_context *rnc) { if (rnc->user_callback != NULL) { (*rnc->user_callback)(rnc->user_cookie); rnc->user_callback = NULL; rnc->user_cookie = NULL; } } static void sci_remote_node_context_continue_state_transitions(struct sci_remote_node_context *rnc) { switch (rnc->destination_state) { case RNC_DEST_READY: case RNC_DEST_SUSPENDED_RESUME: rnc->destination_state = RNC_DEST_READY; fallthrough; case RNC_DEST_FINAL: sci_remote_node_context_resume(rnc, rnc->user_callback, rnc->user_cookie); break; default: rnc->destination_state = RNC_DEST_UNSPECIFIED; break; } } static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc) { union scu_remote_node_context *rnc_buffer; struct isci_remote_device *idev = rnc_to_dev(sci_rnc); struct domain_device *dev = idev->domain_dev; struct isci_host *ihost = idev->owning_port->owning_controller; rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index); rnc_buffer->ssp.is_valid = true; if (dev_is_sata(dev) && dev->parent) { sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96); } else { sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32); if (!dev->parent) sci_port_setup_transports(idev->owning_port, sci_rnc->remote_node_index); } } static void sci_remote_node_context_invalidate_context_buffer(struct sci_remote_node_context *sci_rnc) { union scu_remote_node_context *rnc_buffer; struct isci_remote_device *idev = rnc_to_dev(sci_rnc); struct isci_host *ihost = idev->owning_port->owning_controller; rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index); rnc_buffer->ssp.is_valid = false; sci_remote_device_post_request(rnc_to_dev(sci_rnc), SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE); } static void sci_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm) { struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); struct isci_remote_device *idev = rnc_to_dev(rnc); struct isci_host *ihost = idev->owning_port->owning_controller; /* Check to see if we have gotten back to the initial state because * someone requested to destroy the remote node context object. */ if (sm->previous_state_id == SCI_RNC_INVALIDATING) { rnc->destination_state = RNC_DEST_UNSPECIFIED; sci_remote_node_context_notify_user(rnc); smp_wmb(); wake_up(&ihost->eventq); } } static void sci_remote_node_context_posting_state_enter(struct sci_base_state_machine *sm) { struct sci_remote_node_context *sci_rnc = container_of(sm, typeof(*sci_rnc), sm); sci_remote_node_context_validate_context_buffer(sci_rnc); } static void sci_remote_node_context_invalidating_state_enter(struct sci_base_state_machine *sm) { struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); /* Terminate all outstanding requests. */ sci_remote_device_terminate_requests(rnc_to_dev(rnc)); sci_remote_node_context_invalidate_context_buffer(rnc); } static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_machine *sm) { struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); struct isci_remote_device *idev; struct domain_device *dev; idev = rnc_to_dev(rnc); dev = idev->domain_dev; /* * For direct attached SATA devices we need to clear the TLCR * NCQ to TCi tag mapping on the phy and in cases where we * resume because of a target reset we also need to update * the STPTLDARNI register with the RNi of the device */ if (dev_is_sata(dev) && !dev->parent) sci_port_setup_transports(idev->owning_port, rnc->remote_node_index); sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME); } static void sci_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm) { struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); enum sci_remote_node_context_destination_state dest_select; int tell_user = 1; dest_select = rnc->destination_state; rnc->destination_state = RNC_DEST_UNSPECIFIED; if ((dest_select == RNC_DEST_SUSPENDED) || (dest_select == RNC_DEST_SUSPENDED_RESUME)) { sci_remote_node_context_suspend( rnc, rnc->suspend_reason, SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT); if (dest_select == RNC_DEST_SUSPENDED_RESUME) tell_user = 0; /* Wait until ready again. */ } if (tell_user) sci_remote_node_context_notify_user(rnc); } static void sci_remote_node_context_tx_suspended_state_enter(struct sci_base_state_machine *sm) { struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); sci_remote_node_context_continue_state_transitions(rnc); } static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm) { struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); struct isci_remote_device *idev = rnc_to_dev(rnc); struct isci_host *ihost = idev->owning_port->owning_controller; u32 new_count = rnc->suspend_count + 1; if (new_count == 0) rnc->suspend_count = 1; else rnc->suspend_count = new_count; smp_wmb(); /* Terminate outstanding requests pending abort. */ sci_remote_device_abort_requests_pending_abort(idev); wake_up(&ihost->eventq); sci_remote_node_context_continue_state_transitions(rnc); } static void sci_remote_node_context_await_suspend_state_exit( struct sci_base_state_machine *sm) { struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); struct isci_remote_device *idev = rnc_to_dev(rnc); if (dev_is_sata(idev->domain_dev)) isci_dev_set_hang_detection_timeout(idev, 0); } static const struct sci_base_state sci_remote_node_context_state_table[] = { [SCI_RNC_INITIAL] = { .enter_state = sci_remote_node_context_initial_state_enter, }, [SCI_RNC_POSTING] = { .enter_state = sci_remote_node_context_posting_state_enter, }, [SCI_RNC_INVALIDATING] = { .enter_state = sci_remote_node_context_invalidating_state_enter, }, [SCI_RNC_RESUMING] = { .enter_state = sci_remote_node_context_resuming_state_enter, }, [SCI_RNC_READY] = { .enter_state = sci_remote_node_context_ready_state_enter, }, [SCI_RNC_TX_SUSPENDED] = { .enter_state = sci_remote_node_context_tx_suspended_state_enter, }, [SCI_RNC_TX_RX_SUSPENDED] = { .enter_state = sci_remote_node_context_tx_rx_suspended_state_enter, }, [SCI_RNC_AWAIT_SUSPENSION] = { .exit_state = sci_remote_node_context_await_suspend_state_exit, }, }; void sci_remote_node_context_construct(struct sci_remote_node_context *rnc, u16 remote_node_index) { memset(rnc, 0, sizeof(struct sci_remote_node_context)); rnc->remote_node_index = remote_node_index; rnc->destination_state = RNC_DEST_UNSPECIFIED; sci_init_sm(&rnc->sm, sci_remote_node_context_state_table, SCI_RNC_INITIAL); } enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc, u32 event_code) { enum scis_sds_remote_node_context_states state; u32 next_state; state = sci_rnc->sm.current_state_id; switch (state) { case SCI_RNC_POSTING: switch (scu_get_event_code(event_code)) { case SCU_EVENT_POST_RNC_COMPLETE: sci_change_state(&sci_rnc->sm, SCI_RNC_READY); break; default: goto out; } break; case SCI_RNC_INVALIDATING: if (scu_get_event_code(event_code) == SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE) { if (sci_rnc->destination_state == RNC_DEST_FINAL) next_state = SCI_RNC_INITIAL; else next_state = SCI_RNC_POSTING; sci_change_state(&sci_rnc->sm, next_state); } else { switch (scu_get_event_type(event_code)) { case SCU_EVENT_TYPE_RNC_SUSPEND_TX: case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: /* We really dont care if the hardware is going to suspend * the device since it's being invalidated anyway */ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), "%s: SCIC Remote Node Context 0x%p was " "suspended by hardware while being " "invalidated.\n", __func__, sci_rnc); break; default: goto out; } } break; case SCI_RNC_RESUMING: if (scu_get_event_code(event_code) == SCU_EVENT_POST_RCN_RELEASE) { sci_change_state(&sci_rnc->sm, SCI_RNC_READY); } else { switch (scu_get_event_type(event_code)) { case SCU_EVENT_TYPE_RNC_SUSPEND_TX: case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: /* We really dont care if the hardware is going to suspend * the device since it's being resumed anyway */ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), "%s: SCIC Remote Node Context 0x%p was " "suspended by hardware while being resumed.\n", __func__, sci_rnc); break; default: goto out; } } break; case SCI_RNC_READY: switch (scu_get_event_type(event_code)) { case SCU_EVENT_TL_RNC_SUSPEND_TX: sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED); sci_rnc->suspend_type = scu_get_event_type(event_code); break; case SCU_EVENT_TL_RNC_SUSPEND_TX_RX: sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED); sci_rnc->suspend_type = scu_get_event_type(event_code); break; default: goto out; } break; case SCI_RNC_AWAIT_SUSPENSION: switch (scu_get_event_type(event_code)) { case SCU_EVENT_TL_RNC_SUSPEND_TX: next_state = SCI_RNC_TX_SUSPENDED; break; case SCU_EVENT_TL_RNC_SUSPEND_TX_RX: next_state = SCI_RNC_TX_RX_SUSPENDED; break; default: goto out; } if (sci_rnc->suspend_type == scu_get_event_type(event_code)) sci_change_state(&sci_rnc->sm, next_state); break; default: dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), "%s: invalid state: %s\n", __func__, rnc_state_name(state)); return SCI_FAILURE_INVALID_STATE; } return SCI_SUCCESS; out: dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), "%s: code: %#x state: %s\n", __func__, event_code, rnc_state_name(state)); return SCI_FAILURE; } enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc, scics_sds_remote_node_context_callback cb_fn, void *cb_p) { enum scis_sds_remote_node_context_states state; state = sci_rnc->sm.current_state_id; switch (state) { case SCI_RNC_INVALIDATING: sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p); return SCI_SUCCESS; case SCI_RNC_POSTING: case SCI_RNC_RESUMING: case SCI_RNC_READY: case SCI_RNC_TX_SUSPENDED: case SCI_RNC_TX_RX_SUSPENDED: sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p); sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING); return SCI_SUCCESS; case SCI_RNC_AWAIT_SUSPENSION: sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p); return SCI_SUCCESS; case SCI_RNC_INITIAL: dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), "%s: invalid state: %s\n", __func__, rnc_state_name(state)); /* We have decided that the destruct request on the remote node context * can not fail since it is either in the initial/destroyed state or is * can be destroyed. */ return SCI_SUCCESS; default: dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), "%s: invalid state %s\n", __func__, rnc_state_name(state)); return SCI_FAILURE_INVALID_STATE; } } enum sci_status sci_remote_node_context_suspend( struct sci_remote_node_context *sci_rnc, enum sci_remote_node_suspension_reasons suspend_reason, u32 suspend_type) { enum scis_sds_remote_node_context_states state = sci_rnc->sm.current_state_id; struct isci_remote_device *idev = rnc_to_dev(sci_rnc); enum sci_status status = SCI_FAILURE_INVALID_STATE; enum sci_remote_node_context_destination_state dest_param = RNC_DEST_UNSPECIFIED; dev_dbg(scirdev_to_dev(idev), "%s: current state %s, current suspend_type %x dest state %d," " arg suspend_reason %d, arg suspend_type %x", __func__, rnc_state_name(state), sci_rnc->suspend_type, sci_rnc->destination_state, suspend_reason, suspend_type); /* Disable automatic state continuations if explicitly suspending. */ if ((suspend_reason == SCI_HW_SUSPEND) || (sci_rnc->destination_state == RNC_DEST_FINAL)) dest_param = sci_rnc->destination_state; switch (state) { case SCI_RNC_READY: break; case SCI_RNC_INVALIDATING: if (sci_rnc->destination_state == RNC_DEST_FINAL) { dev_warn(scirdev_to_dev(idev), "%s: already destroying %p\n", __func__, sci_rnc); return SCI_FAILURE_INVALID_STATE; } fallthrough; /* and handle like SCI_RNC_POSTING */ case SCI_RNC_RESUMING: fallthrough; /* and handle like SCI_RNC_POSTING */ case SCI_RNC_POSTING: /* Set the destination state to AWAIT - this signals the * entry into the SCI_RNC_READY state that a suspension * needs to be done immediately. */ if (sci_rnc->destination_state != RNC_DEST_FINAL) sci_rnc->destination_state = RNC_DEST_SUSPENDED; sci_rnc->suspend_type = suspend_type; sci_rnc->suspend_reason = suspend_reason; return SCI_SUCCESS; case SCI_RNC_TX_SUSPENDED: if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX) status = SCI_SUCCESS; break; case SCI_RNC_TX_RX_SUSPENDED: if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX) status = SCI_SUCCESS; break; case SCI_RNC_AWAIT_SUSPENSION: if ((sci_rnc->suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX) || (suspend_type == sci_rnc->suspend_type)) return SCI_SUCCESS; break; default: dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), "%s: invalid state %s\n", __func__, rnc_state_name(state)); return SCI_FAILURE_INVALID_STATE; } sci_rnc->destination_state = dest_param; sci_rnc->suspend_type = suspend_type; sci_rnc->suspend_reason = suspend_reason; if (status == SCI_SUCCESS) { /* Already in the destination state? */ struct isci_host *ihost = idev->owning_port->owning_controller; wake_up_all(&ihost->eventq); /* Let observers look. */ return SCI_SUCCESS; } if ((suspend_reason == SCI_SW_SUSPEND_NORMAL) || (suspend_reason == SCI_SW_SUSPEND_LINKHANG_DETECT)) { if (suspend_reason == SCI_SW_SUSPEND_LINKHANG_DETECT) isci_dev_set_hang_detection_timeout(idev, 0x00000001); sci_remote_device_post_request( idev, SCI_SOFTWARE_SUSPEND_CMD); } if (state != SCI_RNC_AWAIT_SUSPENSION) sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION); return SCI_SUCCESS; } enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc, scics_sds_remote_node_context_callback cb_fn, void *cb_p) { enum scis_sds_remote_node_context_states state; struct isci_remote_device *idev = rnc_to_dev(sci_rnc); state = sci_rnc->sm.current_state_id; dev_dbg(scirdev_to_dev(idev), "%s: state %s, cb_fn = %p, cb_p = %p; dest_state = %d; " "dev resume path %s\n", __func__, rnc_state_name(state), cb_fn, cb_p, sci_rnc->destination_state, test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags) ? "<abort active>" : "<normal>"); switch (state) { case SCI_RNC_INITIAL: if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) return SCI_FAILURE_INVALID_STATE; sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p, RNC_DEST_READY); if (!test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)) { sci_remote_node_context_construct_buffer(sci_rnc); sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING); } return SCI_SUCCESS; case SCI_RNC_POSTING: case SCI_RNC_INVALIDATING: case SCI_RNC_RESUMING: /* We are still waiting to post when a resume was * requested. */ switch (sci_rnc->destination_state) { case RNC_DEST_SUSPENDED: case RNC_DEST_SUSPENDED_RESUME: /* Previously waiting to suspend after posting. * Now continue onto resumption. */ sci_remote_node_context_setup_to_resume( sci_rnc, cb_fn, cb_p, RNC_DEST_SUSPENDED_RESUME); break; default: sci_remote_node_context_setup_to_resume( sci_rnc, cb_fn, cb_p, RNC_DEST_READY); break; } return SCI_SUCCESS; case SCI_RNC_TX_SUSPENDED: case SCI_RNC_TX_RX_SUSPENDED: { struct domain_device *dev = idev->domain_dev; /* If this is an expander attached SATA device we must * invalidate and repost the RNC since this is the only * way to clear the TCi to NCQ tag mapping table for * the RNi. All other device types we can just resume. */ sci_remote_node_context_setup_to_resume( sci_rnc, cb_fn, cb_p, RNC_DEST_READY); if (!test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)) { if ((dev_is_sata(dev) && dev->parent) || (sci_rnc->destination_state == RNC_DEST_FINAL)) sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING); else sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING); } } return SCI_SUCCESS; case SCI_RNC_AWAIT_SUSPENSION: sci_remote_node_context_setup_to_resume( sci_rnc, cb_fn, cb_p, RNC_DEST_SUSPENDED_RESUME); return SCI_SUCCESS; default: dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), "%s: invalid state %s\n", __func__, rnc_state_name(state)); return SCI_FAILURE_INVALID_STATE; } } enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc, struct isci_request *ireq) { enum scis_sds_remote_node_context_states state; state = sci_rnc->sm.current_state_id; switch (state) { case SCI_RNC_READY: return SCI_SUCCESS; case SCI_RNC_TX_SUSPENDED: case SCI_RNC_TX_RX_SUSPENDED: case SCI_RNC_AWAIT_SUSPENSION: dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), "%s: invalid state %s\n", __func__, rnc_state_name(state)); return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; default: dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)), "%s: invalid state %s\n", __func__, rnc_state_name(state)); return SCI_FAILURE_INVALID_STATE; } } enum sci_status sci_remote_node_context_start_task( struct sci_remote_node_context *sci_rnc, struct isci_request *ireq, scics_sds_remote_node_context_callback cb_fn, void *cb_p) { enum sci_status status = sci_remote_node_context_resume(sci_rnc, cb_fn, cb_p); if (status != SCI_SUCCESS) dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), "%s: resume failed: %d\n", __func__, status); return status; } int sci_remote_node_context_is_safe_to_abort( struct sci_remote_node_context *sci_rnc) { enum scis_sds_remote_node_context_states state; state = sci_rnc->sm.current_state_id; switch (state) { case SCI_RNC_INVALIDATING: case SCI_RNC_TX_RX_SUSPENDED: return 1; case SCI_RNC_POSTING: case SCI_RNC_RESUMING: case SCI_RNC_READY: case SCI_RNC_TX_SUSPENDED: case SCI_RNC_AWAIT_SUSPENSION: case SCI_RNC_INITIAL: return 0; default: dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), "%s: invalid state %d\n", __func__, state); return 0; } }
linux-master
drivers/scsi/isci/remote_node_context.c
/* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * The full GNU General Public License is included in this distribution * in the file called LICENSE.GPL. * * BSD LICENSE * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/firmware.h> #include <linux/efi.h> #include <asm/string.h> #include <scsi/scsi_host.h> #include "host.h" #include "isci.h" #include "task.h" #include "probe_roms.h" #define MAJ 1 #define MIN 2 #define BUILD 0 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ __stringify(BUILD) MODULE_VERSION(DRV_VERSION); static struct scsi_transport_template *isci_transport_template; static const struct pci_device_id isci_id_table[] = { { PCI_VDEVICE(INTEL, 0x1D61),}, { PCI_VDEVICE(INTEL, 0x1D63),}, { PCI_VDEVICE(INTEL, 0x1D65),}, { PCI_VDEVICE(INTEL, 0x1D67),}, { PCI_VDEVICE(INTEL, 0x1D69),}, { PCI_VDEVICE(INTEL, 0x1D6B),}, { PCI_VDEVICE(INTEL, 0x1D60),}, { PCI_VDEVICE(INTEL, 0x1D62),}, { PCI_VDEVICE(INTEL, 0x1D64),}, { PCI_VDEVICE(INTEL, 0x1D66),}, { PCI_VDEVICE(INTEL, 0x1D68),}, { PCI_VDEVICE(INTEL, 0x1D6A),}, {} }; MODULE_DEVICE_TABLE(pci, isci_id_table); /* linux isci specific settings */ unsigned char no_outbound_task_to = 2; module_param(no_outbound_task_to, byte, 0); MODULE_PARM_DESC(no_outbound_task_to, "No Outbound Task Timeout (1us incr)"); u16 ssp_max_occ_to = 20; module_param(ssp_max_occ_to, ushort, 0); MODULE_PARM_DESC(ssp_max_occ_to, "SSP Max occupancy timeout (100us incr)"); u16 stp_max_occ_to = 5; module_param(stp_max_occ_to, ushort, 0); MODULE_PARM_DESC(stp_max_occ_to, "STP Max occupancy timeout (100us incr)"); u16 ssp_inactive_to = 5; module_param(ssp_inactive_to, ushort, 0); MODULE_PARM_DESC(ssp_inactive_to, "SSP inactivity timeout (100us incr)"); u16 stp_inactive_to = 5; module_param(stp_inactive_to, ushort, 0); MODULE_PARM_DESC(stp_inactive_to, "STP inactivity timeout (100us incr)"); unsigned char phy_gen = SCIC_SDS_PARM_GEN2_SPEED; module_param(phy_gen, byte, 0); MODULE_PARM_DESC(phy_gen, "PHY generation (1: 1.5Gbps 2: 3.0Gbps 3: 6.0Gbps)"); unsigned char max_concurr_spinup; module_param(max_concurr_spinup, byte, 0); MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup"); uint cable_selection_override = CABLE_OVERRIDE_DISABLED; module_param(cable_selection_override, uint, 0); MODULE_PARM_DESC(cable_selection_override, "This field indicates length of the SAS/SATA cable between " "host and device. If any bits > 15 are set (default) " "indicates \"use platform defaults\""); static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev); struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); struct isci_host *ihost = container_of(sas_ha, typeof(*ihost), sas_ha); return snprintf(buf, PAGE_SIZE, "%d\n", ihost->id); } static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL); static struct attribute *isci_host_attrs[] = { &dev_attr_isci_id.attr, NULL }; ATTRIBUTE_GROUPS(isci_host); static const struct scsi_host_template isci_sht = { .module = THIS_MODULE, .name = DRV_NAME, .proc_name = DRV_NAME, .queuecommand = sas_queuecommand, .dma_need_drain = ata_scsi_dma_need_drain, .target_alloc = sas_target_alloc, .slave_configure = sas_slave_configure, .scan_finished = isci_host_scan_finished, .scan_start = isci_host_start, .change_queue_depth = sas_change_queue_depth, .bios_param = sas_bios_param, .can_queue = ISCI_CAN_QUEUE_VAL, .this_id = -1, .sg_tablesize = SG_ALL, .max_sectors = SCSI_DEFAULT_MAX_SECTORS, .eh_abort_handler = sas_eh_abort_handler, .eh_device_reset_handler = sas_eh_device_reset_handler, .eh_target_reset_handler = sas_eh_target_reset_handler, .slave_alloc = sas_slave_alloc, .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = sas_ioctl, #endif .shost_groups = isci_host_groups, .track_queue_depth = 1, }; static struct sas_domain_function_template isci_transport_ops = { /* The class calls these to notify the LLDD of an event. */ .lldd_port_formed = isci_port_formed, .lldd_port_deformed = isci_port_deformed, /* The class calls these when a device is found or gone. */ .lldd_dev_found = isci_remote_device_found, .lldd_dev_gone = isci_remote_device_gone, .lldd_execute_task = isci_task_execute_task, /* Task Management Functions. Must be called from process context. */ .lldd_abort_task = isci_task_abort_task, .lldd_abort_task_set = isci_task_abort_task_set, .lldd_clear_task_set = isci_task_clear_task_set, .lldd_I_T_nexus_reset = isci_task_I_T_nexus_reset, .lldd_lu_reset = isci_task_lu_reset, .lldd_query_task = isci_task_query_task, /* ata recovery called from ata-eh */ .lldd_ata_check_ready = isci_ata_check_ready, /* Port and Adapter management */ .lldd_clear_nexus_port = isci_task_clear_nexus_port, .lldd_clear_nexus_ha = isci_task_clear_nexus_ha, /* Phy management */ .lldd_control_phy = isci_phy_control, /* GPIO support */ .lldd_write_gpio = isci_gpio_write, }; /****************************************************************************** * P R O T E C T E D M E T H O D S ******************************************************************************/ /** * isci_register_sas_ha() - This method initializes various lldd * specific members of the sas_ha struct and calls the libsas * sas_register_ha() function. * @isci_host: This parameter specifies the lldd specific wrapper for the * libsas sas_ha struct. * * This method returns an error code indicating success or failure. The user * should check for possible memory allocation error return otherwise, a zero * indicates success. */ static int isci_register_sas_ha(struct isci_host *isci_host) { int i; struct sas_ha_struct *sas_ha = &(isci_host->sas_ha); struct asd_sas_phy **sas_phys; struct asd_sas_port **sas_ports; sas_phys = devm_kcalloc(&isci_host->pdev->dev, SCI_MAX_PHYS, sizeof(void *), GFP_KERNEL); if (!sas_phys) return -ENOMEM; sas_ports = devm_kcalloc(&isci_host->pdev->dev, SCI_MAX_PORTS, sizeof(void *), GFP_KERNEL); if (!sas_ports) return -ENOMEM; sas_ha->sas_ha_name = DRV_NAME; sas_ha->sas_addr = &isci_host->phys[0].sas_addr[0]; for (i = 0; i < SCI_MAX_PHYS; i++) { sas_phys[i] = &isci_host->phys[i].sas_phy; sas_ports[i] = &isci_host->sas_ports[i]; } sas_ha->sas_phy = sas_phys; sas_ha->sas_port = sas_ports; sas_ha->num_phys = SCI_MAX_PHYS; sas_ha->strict_wide_ports = 1; return sas_register_ha(sas_ha); } static void isci_unregister(struct isci_host *isci_host) { struct Scsi_Host *shost; if (!isci_host) return; shost = to_shost(isci_host); sas_unregister_ha(&isci_host->sas_ha); sas_remove_host(shost); scsi_host_put(shost); } static int isci_pci_init(struct pci_dev *pdev) { int err, bar_num, bar_mask = 0; void __iomem * const *iomap; err = pcim_enable_device(pdev); if (err) { dev_err(&pdev->dev, "failed enable PCI device %s!\n", pci_name(pdev)); return err; } for (bar_num = 0; bar_num < SCI_PCI_BAR_COUNT; bar_num++) bar_mask |= 1 << (bar_num * 2); err = pcim_iomap_regions(pdev, bar_mask, DRV_NAME); if (err) return err; iomap = pcim_iomap_table(pdev); if (!iomap) return -ENOMEM; pci_set_master(pdev); err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (err) err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); return err; } static int num_controllers(struct pci_dev *pdev) { /* bar size alone can tell us if we are running with a dual controller * part, no need to trust revision ids that might be under broken firmware * control */ resource_size_t scu_bar_size = pci_resource_len(pdev, SCI_SCU_BAR*2); resource_size_t smu_bar_size = pci_resource_len(pdev, SCI_SMU_BAR*2); if (scu_bar_size >= SCI_SCU_BAR_SIZE*SCI_MAX_CONTROLLERS && smu_bar_size >= SCI_SMU_BAR_SIZE*SCI_MAX_CONTROLLERS) return SCI_MAX_CONTROLLERS; else return 1; } static int isci_setup_interrupts(struct pci_dev *pdev) { int err, i, num_msix; struct isci_host *ihost; struct isci_pci_info *pci_info = to_pci_info(pdev); /* * Determine the number of vectors associated with this * PCI function. */ num_msix = num_controllers(pdev) * SCI_NUM_MSI_X_INT; err = pci_alloc_irq_vectors(pdev, num_msix, num_msix, PCI_IRQ_MSIX); if (err < 0) goto intx; for (i = 0; i < num_msix; i++) { int id = i / SCI_NUM_MSI_X_INT; irq_handler_t isr; ihost = pci_info->hosts[id]; /* odd numbered vectors are error interrupts */ if (i & 1) isr = isci_error_isr; else isr = isci_msix_isr; err = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, i), isr, 0, DRV_NAME"-msix", ihost); if (!err) continue; dev_info(&pdev->dev, "msix setup failed falling back to intx\n"); while (i--) { id = i / SCI_NUM_MSI_X_INT; ihost = pci_info->hosts[id]; devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), ihost); } pci_free_irq_vectors(pdev); goto intx; } return 0; intx: for_each_isci_host(i, ihost, pdev) { err = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, 0), isci_intx_isr, IRQF_SHARED, DRV_NAME"-intx", ihost); if (err) break; } return err; } static void isci_user_parameters_get(struct sci_user_parameters *u) { int i; for (i = 0; i < SCI_MAX_PHYS; i++) { struct sci_phy_user_params *u_phy = &u->phys[i]; u_phy->max_speed_generation = phy_gen; /* we are not exporting these for now */ u_phy->align_insertion_frequency = 0x7f; u_phy->in_connection_align_insertion_frequency = 0xff; u_phy->notify_enable_spin_up_insertion_frequency = 0x33; } u->stp_inactivity_timeout = stp_inactive_to; u->ssp_inactivity_timeout = ssp_inactive_to; u->stp_max_occupancy_timeout = stp_max_occ_to; u->ssp_max_occupancy_timeout = ssp_max_occ_to; u->no_outbound_task_timeout = no_outbound_task_to; u->max_concurr_spinup = max_concurr_spinup; } static enum sci_status sci_user_parameters_set(struct isci_host *ihost, struct sci_user_parameters *sci_parms) { u16 index; /* * Validate the user parameters. If they are not legal, then * return a failure. */ for (index = 0; index < SCI_MAX_PHYS; index++) { struct sci_phy_user_params *u; u = &sci_parms->phys[index]; if (!((u->max_speed_generation <= SCIC_SDS_PARM_MAX_SPEED) && (u->max_speed_generation > SCIC_SDS_PARM_NO_SPEED))) return SCI_FAILURE_INVALID_PARAMETER_VALUE; if ((u->in_connection_align_insertion_frequency < 3) || (u->align_insertion_frequency == 0) || (u->notify_enable_spin_up_insertion_frequency == 0)) return SCI_FAILURE_INVALID_PARAMETER_VALUE; } if ((sci_parms->stp_inactivity_timeout == 0) || (sci_parms->ssp_inactivity_timeout == 0) || (sci_parms->stp_max_occupancy_timeout == 0) || (sci_parms->ssp_max_occupancy_timeout == 0) || (sci_parms->no_outbound_task_timeout == 0)) return SCI_FAILURE_INVALID_PARAMETER_VALUE; memcpy(&ihost->user_parameters, sci_parms, sizeof(*sci_parms)); return SCI_SUCCESS; } static void sci_oem_defaults(struct isci_host *ihost) { /* these defaults are overridden by the platform / firmware */ struct sci_user_parameters *user = &ihost->user_parameters; struct sci_oem_params *oem = &ihost->oem_parameters; int i; /* Default to APC mode. */ oem->controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE; /* Default to APC mode. */ oem->controller.max_concurr_spin_up = 1; /* Default to no SSC operation. */ oem->controller.do_enable_ssc = false; /* Default to short cables on all phys. */ oem->controller.cable_selection_mask = 0; /* Initialize all of the port parameter information to narrow ports. */ for (i = 0; i < SCI_MAX_PORTS; i++) oem->ports[i].phy_mask = 0; /* Initialize all of the phy parameter information. */ for (i = 0; i < SCI_MAX_PHYS; i++) { /* Default to 3G (i.e. Gen 2). */ user->phys[i].max_speed_generation = SCIC_SDS_PARM_GEN2_SPEED; /* the frequencies cannot be 0 */ user->phys[i].align_insertion_frequency = 0x7f; user->phys[i].in_connection_align_insertion_frequency = 0xff; user->phys[i].notify_enable_spin_up_insertion_frequency = 0x33; /* Previous Vitesse based expanders had a arbitration issue that * is worked around by having the upper 32-bits of SAS address * with a value greater then the Vitesse company identifier. * Hence, usage of 0x5FCFFFFF. */ oem->phys[i].sas_address.low = 0x1 + ihost->id; oem->phys[i].sas_address.high = 0x5FCFFFFF; } user->stp_inactivity_timeout = 5; user->ssp_inactivity_timeout = 5; user->stp_max_occupancy_timeout = 5; user->ssp_max_occupancy_timeout = 20; user->no_outbound_task_timeout = 2; } static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id) { struct isci_orom *orom = to_pci_info(pdev)->orom; struct sci_user_parameters sci_user_params; u8 oem_version = ISCI_ROM_VER_1_0; struct isci_host *ihost; struct Scsi_Host *shost; int err, i; ihost = devm_kzalloc(&pdev->dev, sizeof(*ihost), GFP_KERNEL); if (!ihost) return NULL; ihost->pdev = pdev; ihost->id = id; spin_lock_init(&ihost->scic_lock); init_waitqueue_head(&ihost->eventq); ihost->sas_ha.dev = &ihost->pdev->dev; ihost->sas_ha.lldd_ha = ihost; tasklet_init(&ihost->completion_tasklet, isci_host_completion_routine, (unsigned long)ihost); /* validate module parameters */ /* TODO: kill struct sci_user_parameters and reference directly */ sci_oem_defaults(ihost); isci_user_parameters_get(&sci_user_params); if (sci_user_parameters_set(ihost, &sci_user_params)) { dev_warn(&pdev->dev, "%s: sci_user_parameters_set failed\n", __func__); return NULL; } /* sanity check platform (or 'firmware') oem parameters */ if (orom) { if (id < 0 || id >= SCI_MAX_CONTROLLERS || id > orom->hdr.num_elements) { dev_warn(&pdev->dev, "parsing firmware oem parameters failed\n"); return NULL; } ihost->oem_parameters = orom->ctrl[id]; oem_version = orom->hdr.version; } /* validate oem parameters (platform, firmware, or built-in defaults) */ if (sci_oem_parameters_validate(&ihost->oem_parameters, oem_version)) { dev_warn(&pdev->dev, "oem parameter validation failed\n"); return NULL; } for (i = 0; i < SCI_MAX_PORTS; i++) { struct isci_port *iport = &ihost->ports[i]; INIT_LIST_HEAD(&iport->remote_dev_list); iport->isci_host = ihost; } for (i = 0; i < SCI_MAX_PHYS; i++) isci_phy_init(&ihost->phys[i], ihost, i); for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) { struct isci_remote_device *idev = &ihost->devices[i]; INIT_LIST_HEAD(&idev->node); } shost = scsi_host_alloc(&isci_sht, sizeof(void *)); if (!shost) return NULL; dev_info(&pdev->dev, "%sSCU controller %d: phy 3-0 cables: " "{%s, %s, %s, %s}\n", (is_cable_select_overridden() ? "* " : ""), ihost->id, lookup_cable_names(decode_cable_selection(ihost, 3)), lookup_cable_names(decode_cable_selection(ihost, 2)), lookup_cable_names(decode_cable_selection(ihost, 1)), lookup_cable_names(decode_cable_selection(ihost, 0))); err = isci_host_init(ihost); if (err) goto err_shost; SHOST_TO_SAS_HA(shost) = &ihost->sas_ha; ihost->sas_ha.shost = shost; shost->transportt = isci_transport_template; shost->max_id = ~0; shost->max_lun = ~0; shost->max_cmd_len = MAX_COMMAND_SIZE; /* turn on DIF support */ scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | SHOST_DIF_TYPE3_PROTECTION); scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); err = scsi_add_host(shost, &pdev->dev); if (err) goto err_shost; err = isci_register_sas_ha(ihost); if (err) goto err_shost_remove; return ihost; err_shost_remove: scsi_remove_host(shost); err_shost: scsi_host_put(shost); return NULL; } static int isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct isci_pci_info *pci_info; int err, i; struct isci_host *isci_host; const struct firmware *fw = NULL; struct isci_orom *orom = NULL; char *source = "(platform)"; dev_info(&pdev->dev, "driver configured for rev: %d silicon\n", pdev->revision); pci_info = devm_kzalloc(&pdev->dev, sizeof(*pci_info), GFP_KERNEL); if (!pci_info) return -ENOMEM; pci_set_drvdata(pdev, pci_info); if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE)) orom = isci_get_efi_var(pdev); if (!orom) orom = isci_request_oprom(pdev); for (i = 0; orom && i < num_controllers(pdev); i++) { if (sci_oem_parameters_validate(&orom->ctrl[i], orom->hdr.version)) { dev_warn(&pdev->dev, "[%d]: invalid oem parameters detected, falling back to firmware\n", i); orom = NULL; break; } } if (!orom) { source = "(firmware)"; orom = isci_request_firmware(pdev, fw); if (!orom) { /* TODO convert this to WARN_TAINT_ONCE once the * orom/efi parameter support is widely available */ dev_warn(&pdev->dev, "Loading user firmware failed, using default " "values\n"); dev_warn(&pdev->dev, "Default OEM configuration being used: 4 " "narrow ports, and default SAS Addresses\n"); } } if (orom) dev_info(&pdev->dev, "OEM SAS parameters (version: %u.%u) loaded %s\n", (orom->hdr.version & 0xf0) >> 4, (orom->hdr.version & 0xf), source); pci_info->orom = orom; err = isci_pci_init(pdev); if (err) return err; for (i = 0; i < num_controllers(pdev); i++) { struct isci_host *h = isci_host_alloc(pdev, i); if (!h) { err = -ENOMEM; goto err_host_alloc; } pci_info->hosts[i] = h; } err = isci_setup_interrupts(pdev); if (err) goto err_host_alloc; for_each_isci_host(i, isci_host, pdev) scsi_scan_host(to_shost(isci_host)); return 0; err_host_alloc: for_each_isci_host(i, isci_host, pdev) isci_unregister(isci_host); return err; } static void isci_pci_remove(struct pci_dev *pdev) { struct isci_host *ihost; int i; for_each_isci_host(i, ihost, pdev) { wait_for_start(ihost); isci_unregister(ihost); isci_host_deinit(ihost); } } #ifdef CONFIG_PM_SLEEP static int isci_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct isci_host *ihost; int i; for_each_isci_host(i, ihost, pdev) { sas_suspend_ha(&ihost->sas_ha); isci_host_deinit(ihost); } return 0; } static int isci_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct isci_host *ihost; int i; for_each_isci_host(i, ihost, pdev) { sas_prep_resume_ha(&ihost->sas_ha); isci_host_init(ihost); isci_host_start(ihost->sas_ha.shost); wait_for_start(ihost); sas_resume_ha(&ihost->sas_ha); } return 0; } #endif static SIMPLE_DEV_PM_OPS(isci_pm_ops, isci_suspend, isci_resume); static struct pci_driver isci_pci_driver = { .name = DRV_NAME, .id_table = isci_id_table, .probe = isci_pci_probe, .remove = isci_pci_remove, .driver.pm = &isci_pm_ops, }; static __init int isci_init(void) { int err; pr_info("%s: Intel(R) C600 SAS Controller Driver - version %s\n", DRV_NAME, DRV_VERSION); isci_transport_template = sas_domain_attach_transport(&isci_transport_ops); if (!isci_transport_template) return -ENOMEM; err = pci_register_driver(&isci_pci_driver); if (err) sas_release_transport(isci_transport_template); return err; } static __exit void isci_exit(void) { pci_unregister_driver(&isci_pci_driver); sas_release_transport(isci_transport_template); } MODULE_LICENSE("Dual BSD/GPL"); MODULE_FIRMWARE(ISCI_FW_NAME); module_init(isci_init); module_exit(isci_exit);
linux-master
drivers/scsi/isci/init.c
/* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * The full GNU General Public License is included in this distribution * in the file called LICENSE.GPL. * * BSD LICENSE * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "host.h" #include "unsolicited_frame_control.h" #include "registers.h" void sci_unsolicited_frame_control_construct(struct isci_host *ihost) { struct sci_unsolicited_frame_control *uf_control = &ihost->uf_control; struct sci_unsolicited_frame *uf; dma_addr_t dma = ihost->ufi_dma; void *virt = ihost->ufi_buf; int i; /* * The Unsolicited Frame buffers are set at the start of the UF * memory descriptor entry. The headers and address table will be * placed after the buffers. */ /* * Program the location of the UF header table into the SCU. * Notes: * - The address must align on a 64-byte boundary. Guaranteed to be * on 64-byte boundary already 1KB boundary for unsolicited frames. * - Program unused header entries to overlap with the last * unsolicited frame. The silicon will never DMA to these unused * headers, since we program the UF address table pointers to * NULL. */ uf_control->headers.physical_address = dma + SCI_UFI_BUF_SIZE; uf_control->headers.array = virt + SCI_UFI_BUF_SIZE; /* * Program the location of the UF address table into the SCU. * Notes: * - The address must align on a 64-bit boundary. Guaranteed to be on 64 * byte boundary already due to above programming headers being on a * 64-bit boundary and headers are on a 64-bytes in size. */ uf_control->address_table.physical_address = dma + SCI_UFI_BUF_SIZE + SCI_UFI_HDR_SIZE; uf_control->address_table.array = virt + SCI_UFI_BUF_SIZE + SCI_UFI_HDR_SIZE; uf_control->get = 0; /* * UF buffer requirements are: * - The last entry in the UF queue is not NULL. * - There is a power of 2 number of entries (NULL or not-NULL) * programmed into the queue. * - Aligned on a 1KB boundary. */ /* * Program the actual used UF buffers into the UF address table and * the controller's array of UFs. */ for (i = 0; i < SCU_MAX_UNSOLICITED_FRAMES; i++) { uf = &uf_control->buffers.array[i]; uf_control->address_table.array[i] = dma; uf->buffer = virt; uf->header = &uf_control->headers.array[i]; uf->state = UNSOLICITED_FRAME_EMPTY; /* * Increment the address of the physical and virtual memory * pointers. Everything is aligned on 1k boundary with an * increment of 1k. */ virt += SCU_UNSOLICITED_FRAME_BUFFER_SIZE; dma += SCU_UNSOLICITED_FRAME_BUFFER_SIZE; } } enum sci_status sci_unsolicited_frame_control_get_header(struct sci_unsolicited_frame_control *uf_control, u32 frame_index, void **frame_header) { if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) { /* Skip the first word in the frame since this is a controll word used * by the hardware. */ *frame_header = &uf_control->buffers.array[frame_index].header->data; return SCI_SUCCESS; } return SCI_FAILURE_INVALID_PARAMETER_VALUE; } enum sci_status sci_unsolicited_frame_control_get_buffer(struct sci_unsolicited_frame_control *uf_control, u32 frame_index, void **frame_buffer) { if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) { *frame_buffer = uf_control->buffers.array[frame_index].buffer; return SCI_SUCCESS; } return SCI_FAILURE_INVALID_PARAMETER_VALUE; } bool sci_unsolicited_frame_control_release_frame(struct sci_unsolicited_frame_control *uf_control, u32 frame_index) { u32 frame_get; u32 frame_cycle; frame_get = uf_control->get & (SCU_MAX_UNSOLICITED_FRAMES - 1); frame_cycle = uf_control->get & SCU_MAX_UNSOLICITED_FRAMES; /* * In the event there are NULL entries in the UF table, we need to * advance the get pointer in order to find out if this frame should * be released (i.e. update the get pointer) */ while (lower_32_bits(uf_control->address_table.array[frame_get]) == 0 && upper_32_bits(uf_control->address_table.array[frame_get]) == 0 && frame_get < SCU_MAX_UNSOLICITED_FRAMES) frame_get++; /* * The table has a NULL entry as it's last element. This is * illegal. */ BUG_ON(frame_get >= SCU_MAX_UNSOLICITED_FRAMES); if (frame_index >= SCU_MAX_UNSOLICITED_FRAMES) return false; uf_control->buffers.array[frame_index].state = UNSOLICITED_FRAME_RELEASED; if (frame_get != frame_index) { /* * Frames remain in use until we advance the get pointer * so there is nothing we can do here */ return false; } /* * The frame index is equal to the current get pointer so we * can now free up all of the frame entries that */ while (uf_control->buffers.array[frame_get].state == UNSOLICITED_FRAME_RELEASED) { uf_control->buffers.array[frame_get].state = UNSOLICITED_FRAME_EMPTY; if (frame_get+1 == SCU_MAX_UNSOLICITED_FRAMES-1) { frame_cycle ^= SCU_MAX_UNSOLICITED_FRAMES; frame_get = 0; } else frame_get++; } uf_control->get = SCU_UFQGP_GEN_BIT(ENABLE_BIT) | frame_cycle | frame_get; return true; }
linux-master
drivers/scsi/isci/unsolicited_frame_control.c
/* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * The full GNU General Public License is included in this distribution * in the file called LICENSE.GPL. * * BSD LICENSE * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/completion.h> #include <linux/irqflags.h> #include "sas.h" #include <scsi/libsas.h> #include "remote_device.h" #include "remote_node_context.h" #include "isci.h" #include "request.h" #include "task.h" #include "host.h" /** * isci_task_refuse() - complete the request to the upper layer driver in * the case where an I/O needs to be completed back in the submit path. * @ihost: host on which the the request was queued * @task: request to complete * @response: response code for the completed task. * @status: status code for the completed task. * */ static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task, enum service_response response, enum exec_status status) { unsigned long flags; /* Normal notification (task_done) */ dev_dbg(&ihost->pdev->dev, "%s: task = %p, response=%d, status=%d\n", __func__, task, response, status); spin_lock_irqsave(&task->task_state_lock, flags); task->task_status.resp = response; task->task_status.stat = status; /* Normal notification (task_done) */ task->task_state_flags |= SAS_TASK_STATE_DONE; task->task_state_flags &= ~SAS_TASK_STATE_PENDING; task->lldd_task = NULL; spin_unlock_irqrestore(&task->task_state_lock, flags); task->task_done(task); } #define for_each_sas_task(num, task) \ for (; num > 0; num--,\ task = list_entry(task->list.next, struct sas_task, list)) static inline int isci_device_io_ready(struct isci_remote_device *idev, struct sas_task *task) { return idev ? test_bit(IDEV_IO_READY, &idev->flags) || (test_bit(IDEV_IO_NCQERROR, &idev->flags) && isci_task_is_ncq_recovery(task)) : 0; } /** * isci_task_execute_task() - This function is one of the SAS Domain Template * functions. This function is called by libsas to send a task down to * hardware. * @task: This parameter specifies the SAS task to send. * @gfp_flags: This parameter specifies the context of this call. * * status, zero indicates success. */ int isci_task_execute_task(struct sas_task *task, gfp_t gfp_flags) { struct isci_host *ihost = dev_to_ihost(task->dev); struct isci_remote_device *idev; unsigned long flags; enum sci_status status = SCI_FAILURE; bool io_ready; u16 tag; spin_lock_irqsave(&ihost->scic_lock, flags); idev = isci_lookup_device(task->dev); io_ready = isci_device_io_ready(idev, task); tag = isci_alloc_tag(ihost); spin_unlock_irqrestore(&ihost->scic_lock, flags); dev_dbg(&ihost->pdev->dev, "task: %p, dev: %p idev: %p:%#lx cmd = %p\n", task, task->dev, idev, idev ? idev->flags : 0, task->uldd_task); if (!idev) { isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED, SAS_DEVICE_UNKNOWN); } else if (!io_ready || tag == SCI_CONTROLLER_INVALID_IO_TAG) { /* Indicate QUEUE_FULL so that the scsi midlayer * retries. */ isci_task_refuse(ihost, task, SAS_TASK_COMPLETE, SAS_QUEUE_FULL); } else { /* There is a device and it's ready for I/O. */ spin_lock_irqsave(&task->task_state_lock, flags); if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { /* The I/O was aborted. */ spin_unlock_irqrestore(&task->task_state_lock, flags); isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED, SAS_SAM_STAT_TASK_ABORTED); } else { struct isci_request *ireq; /* do common allocation and init of request object. */ ireq = isci_io_request_from_tag(ihost, task, tag); spin_unlock_irqrestore(&task->task_state_lock, flags); /* build and send the request. */ /* do common allocation and init of request object. */ status = isci_request_execute(ihost, idev, task, ireq); if (status != SCI_SUCCESS) { if (test_bit(IDEV_GONE, &idev->flags)) { /* Indicate that the device * is gone. */ isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED, SAS_DEVICE_UNKNOWN); } else { /* Indicate QUEUE_FULL so that * the scsi midlayer retries. * If the request failed for * remote device reasons, it * gets returned as * SAS_TASK_UNDELIVERED next * time through. */ isci_task_refuse(ihost, task, SAS_TASK_COMPLETE, SAS_QUEUE_FULL); } } } } if (status != SCI_SUCCESS && tag != SCI_CONTROLLER_INVALID_IO_TAG) { spin_lock_irqsave(&ihost->scic_lock, flags); /* command never hit the device, so just free * the tci and skip the sequence increment */ isci_tci_free(ihost, ISCI_TAG_TCI(tag)); spin_unlock_irqrestore(&ihost->scic_lock, flags); } isci_put_device(idev); return 0; } static struct isci_request *isci_task_request_build(struct isci_host *ihost, struct isci_remote_device *idev, u16 tag, struct isci_tmf *isci_tmf) { enum sci_status status = SCI_FAILURE; struct isci_request *ireq = NULL; struct domain_device *dev; dev_dbg(&ihost->pdev->dev, "%s: isci_tmf = %p\n", __func__, isci_tmf); dev = idev->domain_dev; /* do common allocation and init of request object. */ ireq = isci_tmf_request_from_tag(ihost, isci_tmf, tag); if (!ireq) return NULL; /* let the core do it's construct. */ status = sci_task_request_construct(ihost, idev, tag, ireq); if (status != SCI_SUCCESS) { dev_warn(&ihost->pdev->dev, "%s: sci_task_request_construct failed - " "status = 0x%x\n", __func__, status); return NULL; } /* XXX convert to get this from task->tproto like other drivers */ if (dev->dev_type == SAS_END_DEVICE) { isci_tmf->proto = SAS_PROTOCOL_SSP; status = sci_task_request_construct_ssp(ireq); if (status != SCI_SUCCESS) return NULL; } return ireq; } static int isci_task_execute_tmf(struct isci_host *ihost, struct isci_remote_device *idev, struct isci_tmf *tmf, unsigned long timeout_ms) { DECLARE_COMPLETION_ONSTACK(completion); enum sci_status status = SCI_FAILURE; struct isci_request *ireq; int ret = TMF_RESP_FUNC_FAILED; unsigned long flags; unsigned long timeleft; u16 tag; spin_lock_irqsave(&ihost->scic_lock, flags); tag = isci_alloc_tag(ihost); spin_unlock_irqrestore(&ihost->scic_lock, flags); if (tag == SCI_CONTROLLER_INVALID_IO_TAG) return ret; /* sanity check, return TMF_RESP_FUNC_FAILED * if the device is not there and ready. */ if (!idev || (!test_bit(IDEV_IO_READY, &idev->flags) && !test_bit(IDEV_IO_NCQERROR, &idev->flags))) { dev_dbg(&ihost->pdev->dev, "%s: idev = %p not ready (%#lx)\n", __func__, idev, idev ? idev->flags : 0); goto err_tci; } else dev_dbg(&ihost->pdev->dev, "%s: idev = %p\n", __func__, idev); /* Assign the pointer to the TMF's completion kernel wait structure. */ tmf->complete = &completion; tmf->status = SCI_FAILURE_TIMEOUT; ireq = isci_task_request_build(ihost, idev, tag, tmf); if (!ireq) goto err_tci; spin_lock_irqsave(&ihost->scic_lock, flags); /* start the TMF io. */ status = sci_controller_start_task(ihost, idev, ireq); if (status != SCI_SUCCESS) { dev_dbg(&ihost->pdev->dev, "%s: start_io failed - status = 0x%x, request = %p\n", __func__, status, ireq); spin_unlock_irqrestore(&ihost->scic_lock, flags); goto err_tci; } spin_unlock_irqrestore(&ihost->scic_lock, flags); /* The RNC must be unsuspended before the TMF can get a response. */ isci_remote_device_resume_from_abort(ihost, idev); /* Wait for the TMF to complete, or a timeout. */ timeleft = wait_for_completion_timeout(&completion, msecs_to_jiffies(timeout_ms)); if (timeleft == 0) { /* The TMF did not complete - this could be because * of an unplug. Terminate the TMF request now. */ isci_remote_device_suspend_terminate(ihost, idev, ireq); } isci_print_tmf(ihost, tmf); if (tmf->status == SCI_SUCCESS) ret = TMF_RESP_FUNC_COMPLETE; else if (tmf->status == SCI_FAILURE_IO_RESPONSE_VALID) { dev_dbg(&ihost->pdev->dev, "%s: tmf.status == " "SCI_FAILURE_IO_RESPONSE_VALID\n", __func__); ret = TMF_RESP_FUNC_COMPLETE; } /* Else - leave the default "failed" status alone. */ dev_dbg(&ihost->pdev->dev, "%s: completed request = %p\n", __func__, ireq); return ret; err_tci: spin_lock_irqsave(&ihost->scic_lock, flags); isci_tci_free(ihost, ISCI_TAG_TCI(tag)); spin_unlock_irqrestore(&ihost->scic_lock, flags); return ret; } static void isci_task_build_tmf(struct isci_tmf *tmf, enum isci_tmf_function_codes code) { memset(tmf, 0, sizeof(*tmf)); tmf->tmf_code = code; } static void isci_task_build_abort_task_tmf(struct isci_tmf *tmf, enum isci_tmf_function_codes code, struct isci_request *old_request) { isci_task_build_tmf(tmf, code); tmf->io_tag = old_request->io_tag; } /* * isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain * Template functions. * @lun: This parameter specifies the lun to be reset. * * status, zero indicates success. */ static int isci_task_send_lu_reset_sas( struct isci_host *isci_host, struct isci_remote_device *isci_device, u8 *lun) { struct isci_tmf tmf; int ret = TMF_RESP_FUNC_FAILED; dev_dbg(&isci_host->pdev->dev, "%s: isci_host = %p, isci_device = %p\n", __func__, isci_host, isci_device); /* Send the LUN reset to the target. By the time the call returns, * the TMF has fully exected in the target (in which case the return * value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or * was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED"). */ isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset); #define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */ ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, ISCI_LU_RESET_TIMEOUT_MS); if (ret == TMF_RESP_FUNC_COMPLETE) dev_dbg(&isci_host->pdev->dev, "%s: %p: TMF_LU_RESET passed\n", __func__, isci_device); else dev_dbg(&isci_host->pdev->dev, "%s: %p: TMF_LU_RESET failed (%x)\n", __func__, isci_device, ret); return ret; } int isci_task_lu_reset(struct domain_device *dev, u8 *lun) { struct isci_host *ihost = dev_to_ihost(dev); struct isci_remote_device *idev; unsigned long flags; int ret = TMF_RESP_FUNC_COMPLETE; spin_lock_irqsave(&ihost->scic_lock, flags); idev = isci_get_device(dev->lldd_dev); spin_unlock_irqrestore(&ihost->scic_lock, flags); dev_dbg(&ihost->pdev->dev, "%s: domain_device=%p, isci_host=%p; isci_device=%p\n", __func__, dev, ihost, idev); if (!idev) { /* If the device is gone, escalate to I_T_Nexus_Reset. */ dev_dbg(&ihost->pdev->dev, "%s: No dev\n", __func__); ret = TMF_RESP_FUNC_FAILED; goto out; } /* Suspend the RNC, kill all TCs */ if (isci_remote_device_suspend_terminate(ihost, idev, NULL) != SCI_SUCCESS) { /* The suspend/terminate only fails if isci_get_device fails */ ret = TMF_RESP_FUNC_FAILED; goto out; } /* All pending I/Os have been terminated and cleaned up. */ if (!test_bit(IDEV_GONE, &idev->flags)) { if (dev_is_sata(dev)) sas_ata_schedule_reset(dev); else /* Send the task management part of the reset. */ ret = isci_task_send_lu_reset_sas(ihost, idev, lun); } out: isci_put_device(idev); return ret; } /* int (*lldd_clear_nexus_port)(struct asd_sas_port *); */ int isci_task_clear_nexus_port(struct asd_sas_port *port) { return TMF_RESP_FUNC_FAILED; } int isci_task_clear_nexus_ha(struct sas_ha_struct *ha) { return TMF_RESP_FUNC_FAILED; } /* Task Management Functions. Must be called from process context. */ /** * isci_task_abort_task() - This function is one of the SAS Domain Template * functions. This function is called by libsas to abort a specified task. * @task: This parameter specifies the SAS task to abort. * * status, zero indicates success. */ int isci_task_abort_task(struct sas_task *task) { struct isci_host *ihost = dev_to_ihost(task->dev); DECLARE_COMPLETION_ONSTACK(aborted_io_completion); struct isci_request *old_request = NULL; struct isci_remote_device *idev = NULL; struct isci_tmf tmf; int ret = TMF_RESP_FUNC_FAILED; unsigned long flags; int target_done_already = 0; /* Get the isci_request reference from the task. Note that * this check does not depend on the pending request list * in the device, because tasks driving resets may land here * after completion in the core. */ spin_lock_irqsave(&ihost->scic_lock, flags); spin_lock(&task->task_state_lock); old_request = task->lldd_task; /* If task is already done, the request isn't valid */ if (!(task->task_state_flags & SAS_TASK_STATE_DONE) && old_request) { idev = isci_get_device(task->dev->lldd_dev); target_done_already = test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags); } spin_unlock(&task->task_state_lock); spin_unlock_irqrestore(&ihost->scic_lock, flags); dev_warn(&ihost->pdev->dev, "%s: dev = %p (%s%s), task = %p, old_request == %p\n", __func__, idev, (dev_is_sata(task->dev) ? "STP/SATA" : ((dev_is_expander(task->dev->dev_type)) ? "SMP" : "SSP")), ((idev) ? ((test_bit(IDEV_GONE, &idev->flags)) ? " IDEV_GONE" : "") : " <NULL>"), task, old_request); /* Device reset conditions signalled in task_state_flags are the * responsbility of libsas to observe at the start of the error * handler thread. */ if (!idev || !old_request) { /* The request has already completed and there * is nothing to do here other than to set the task * done bit, and indicate that the task abort function * was successful. */ spin_lock_irqsave(&task->task_state_lock, flags); task->task_state_flags |= SAS_TASK_STATE_DONE; task->task_state_flags &= ~SAS_TASK_STATE_PENDING; spin_unlock_irqrestore(&task->task_state_lock, flags); ret = TMF_RESP_FUNC_COMPLETE; dev_warn(&ihost->pdev->dev, "%s: abort task not needed for %p\n", __func__, task); goto out; } /* Suspend the RNC, kill the TC */ if (isci_remote_device_suspend_terminate(ihost, idev, old_request) != SCI_SUCCESS) { dev_warn(&ihost->pdev->dev, "%s: isci_remote_device_reset_terminate(dev=%p, " "req=%p, task=%p) failed\n", __func__, idev, old_request, task); ret = TMF_RESP_FUNC_FAILED; goto out; } spin_lock_irqsave(&ihost->scic_lock, flags); if (task->task_proto == SAS_PROTOCOL_SMP || sas_protocol_ata(task->task_proto) || target_done_already || test_bit(IDEV_GONE, &idev->flags)) { spin_unlock_irqrestore(&ihost->scic_lock, flags); /* No task to send, so explicitly resume the device here */ isci_remote_device_resume_from_abort(ihost, idev); dev_warn(&ihost->pdev->dev, "%s: %s request" " or complete_in_target (%d), " "or IDEV_GONE (%d), thus no TMF\n", __func__, ((task->task_proto == SAS_PROTOCOL_SMP) ? "SMP" : (sas_protocol_ata(task->task_proto) ? "SATA/STP" : "<other>") ), test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags), test_bit(IDEV_GONE, &idev->flags)); spin_lock_irqsave(&task->task_state_lock, flags); task->task_state_flags &= ~SAS_TASK_STATE_PENDING; task->task_state_flags |= SAS_TASK_STATE_DONE; spin_unlock_irqrestore(&task->task_state_lock, flags); ret = TMF_RESP_FUNC_COMPLETE; } else { /* Fill in the tmf structure */ isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort, old_request); spin_unlock_irqrestore(&ihost->scic_lock, flags); /* Send the task management request. */ #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* 1/2 second timeout */ ret = isci_task_execute_tmf(ihost, idev, &tmf, ISCI_ABORT_TASK_TIMEOUT_MS); } out: dev_warn(&ihost->pdev->dev, "%s: Done; dev = %p, task = %p , old_request == %p\n", __func__, idev, task, old_request); isci_put_device(idev); return ret; } /** * isci_task_abort_task_set() - This function is one of the SAS Domain Template * functions. This is one of the Task Management functoins called by libsas, * to abort all task for the given lun. * @d_device: This parameter specifies the domain device associated with this * request. * @lun: This parameter specifies the lun associated with this request. * * status, zero indicates success. */ int isci_task_abort_task_set( struct domain_device *d_device, u8 *lun) { return TMF_RESP_FUNC_FAILED; } /** * isci_task_clear_task_set() - This function is one of the SAS Domain Template * functions. This is one of the Task Management functoins called by libsas. * @d_device: This parameter specifies the domain device associated with this * request. * @lun: This parameter specifies the lun associated with this request. * * status, zero indicates success. */ int isci_task_clear_task_set( struct domain_device *d_device, u8 *lun) { return TMF_RESP_FUNC_FAILED; } /** * isci_task_query_task() - This function is implemented to cause libsas to * correctly escalate the failed abort to a LUN or target reset (this is * because sas_scsi_find_task libsas function does not correctly interpret * all return codes from the abort task call). When TMF_RESP_FUNC_SUCC is * returned, libsas turns this into a LUN reset; when FUNC_FAILED is * returned, libsas will turn this into a target reset * @task: This parameter specifies the sas task being queried. * * status, zero indicates success. */ int isci_task_query_task( struct sas_task *task) { /* See if there is a pending device reset for this device. */ if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) return TMF_RESP_FUNC_FAILED; else return TMF_RESP_FUNC_SUCC; } /* * isci_task_request_complete() - This function is called by the sci core when * an task request completes. * @ihost: This parameter specifies the ISCI host object * @ireq: This parameter is the completed isci_request object. * @completion_status: This parameter specifies the completion status from the * sci core. * * none. */ void isci_task_request_complete(struct isci_host *ihost, struct isci_request *ireq, enum sci_task_status completion_status) { struct isci_tmf *tmf = isci_request_access_tmf(ireq); struct completion *tmf_complete = NULL; dev_dbg(&ihost->pdev->dev, "%s: request = %p, status=%d\n", __func__, ireq, completion_status); set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags); if (tmf) { tmf->status = completion_status; if (tmf->proto == SAS_PROTOCOL_SSP) { memcpy(tmf->resp.rsp_buf, ireq->ssp.rsp_buf, SSP_RESP_IU_MAX_SIZE); } else if (tmf->proto == SAS_PROTOCOL_SATA) { memcpy(&tmf->resp.d2h_fis, &ireq->stp.rsp, sizeof(struct dev_to_host_fis)); } /* PRINT_TMF( ((struct isci_tmf *)request->task)); */ tmf_complete = tmf->complete; } sci_controller_complete_io(ihost, ireq->target_device, ireq); /* set the 'terminated' flag handle to make sure it cannot be terminated * or completed again. */ set_bit(IREQ_TERMINATED, &ireq->flags); if (test_and_clear_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags)) wake_up_all(&ihost->eventq); if (!test_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags)) isci_free_tag(ihost, ireq->io_tag); /* The task management part completes last. */ if (tmf_complete) complete(tmf_complete); } static int isci_reset_device(struct isci_host *ihost, struct domain_device *dev, struct isci_remote_device *idev) { int rc = TMF_RESP_FUNC_COMPLETE, reset_stat = -1; struct sas_phy *phy = sas_get_local_phy(dev); struct isci_port *iport = dev->port->lldd_port; dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev); /* Suspend the RNC, terminate all outstanding TCs. */ if (isci_remote_device_suspend_terminate(ihost, idev, NULL) != SCI_SUCCESS) { rc = TMF_RESP_FUNC_FAILED; goto out; } /* Note that since the termination for outstanding requests succeeded, * this function will return success. This is because the resets will * only fail if the device has been removed (ie. hotplug), and the * primary duty of this function is to cleanup tasks, so that is the * relevant status. */ if (!test_bit(IDEV_GONE, &idev->flags)) { if (scsi_is_sas_phy_local(phy)) { struct isci_phy *iphy = &ihost->phys[phy->number]; reset_stat = isci_port_perform_hard_reset(ihost, iport, iphy); } else reset_stat = sas_phy_reset(phy, !dev_is_sata(dev)); } /* Explicitly resume the RNC here, since there was no task sent. */ isci_remote_device_resume_from_abort(ihost, idev); dev_dbg(&ihost->pdev->dev, "%s: idev %p complete, reset_stat=%d.\n", __func__, idev, reset_stat); out: sas_put_local_phy(phy); return rc; } int isci_task_I_T_nexus_reset(struct domain_device *dev) { struct isci_host *ihost = dev_to_ihost(dev); struct isci_remote_device *idev; unsigned long flags; int ret; spin_lock_irqsave(&ihost->scic_lock, flags); idev = isci_get_device(dev->lldd_dev); spin_unlock_irqrestore(&ihost->scic_lock, flags); if (!idev) { /* XXX: need to cleanup any ireqs targeting this * domain_device */ ret = -ENODEV; goto out; } ret = isci_reset_device(ihost, dev, idev); out: isci_put_device(idev); return ret; }
linux-master
drivers/scsi/isci/task.c
/* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * The full GNU General Public License is included in this distribution * in the file called LICENSE.GPL. * * BSD LICENSE * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "host.h" #define SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT (10) #define SCIC_SDS_APC_RECONFIGURATION_TIMEOUT (10) #define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION (1000) enum SCIC_SDS_APC_ACTIVITY { SCIC_SDS_APC_SKIP_PHY, SCIC_SDS_APC_ADD_PHY, SCIC_SDS_APC_START_TIMER, SCIC_SDS_APC_ACTIVITY_MAX }; /* * ****************************************************************************** * General port configuration agent routines * ****************************************************************************** */ /** * sci_sas_address_compare() * @address_one: A SAS Address to be compared. * @address_two: A SAS Address to be compared. * * Compare the two SAS Address and if SAS Address One is greater than SAS * Address Two then return > 0 else if SAS Address One is less than SAS Address * Two return < 0 Otherwise they are the same return 0 A signed value of x > 0 * > y where x is returned for Address One > Address Two y is returned for * Address One < Address Two 0 is returned ofr Address One = Address Two */ static s32 sci_sas_address_compare( struct sci_sas_address address_one, struct sci_sas_address address_two) { if (address_one.high > address_two.high) { return 1; } else if (address_one.high < address_two.high) { return -1; } else if (address_one.low > address_two.low) { return 1; } else if (address_one.low < address_two.low) { return -1; } /* The two SAS Address must be identical */ return 0; } /** * sci_port_configuration_agent_find_port() * @ihost: The controller object used for the port search. * @iphy: The phy object to match. * * This routine will find a matching port for the phy. This means that the * port and phy both have the same broadcast sas address and same received sas * address. The port address or the NULL if there is no matching * port. port address if the port can be found to match the phy. * NULL if there is no matching port for the phy. */ static struct isci_port *sci_port_configuration_agent_find_port( struct isci_host *ihost, struct isci_phy *iphy) { u8 i; struct sci_sas_address port_sas_address; struct sci_sas_address port_attached_device_address; struct sci_sas_address phy_sas_address; struct sci_sas_address phy_attached_device_address; /* * Since this phy can be a member of a wide port check to see if one or * more phys match the sent and received SAS address as this phy in which * case it should participate in the same port. */ sci_phy_get_sas_address(iphy, &phy_sas_address); sci_phy_get_attached_sas_address(iphy, &phy_attached_device_address); for (i = 0; i < ihost->logical_port_entries; i++) { struct isci_port *iport = &ihost->ports[i]; sci_port_get_sas_address(iport, &port_sas_address); sci_port_get_attached_sas_address(iport, &port_attached_device_address); if (sci_sas_address_compare(port_sas_address, phy_sas_address) == 0 && sci_sas_address_compare(port_attached_device_address, phy_attached_device_address) == 0) return iport; } return NULL; } /** * sci_port_configuration_agent_validate_ports() * @ihost: This is the controller object that contains the port agent * @port_agent: This is the port configuration agent for the controller. * * This routine will validate the port configuration is correct for the SCU * hardware. The SCU hardware allows for port configurations as follows. LP0 * -> (PE0), (PE0, PE1), (PE0, PE1, PE2, PE3) LP1 -> (PE1) LP2 -> (PE2), (PE2, * PE3) LP3 -> (PE3) enum sci_status SCI_SUCCESS the port configuration is valid for * this port configuration agent. SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION * the port configuration is not valid for this port configuration agent. */ static enum sci_status sci_port_configuration_agent_validate_ports( struct isci_host *ihost, struct sci_port_configuration_agent *port_agent) { struct sci_sas_address first_address; struct sci_sas_address second_address; /* * Sanity check the max ranges for all the phys the max index * is always equal to the port range index */ if (port_agent->phy_valid_port_range[0].max_index != 0 || port_agent->phy_valid_port_range[1].max_index != 1 || port_agent->phy_valid_port_range[2].max_index != 2 || port_agent->phy_valid_port_range[3].max_index != 3) return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; /* * This is a request to configure a single x4 port or at least attempt * to make all the phys into a single port */ if (port_agent->phy_valid_port_range[0].min_index == 0 && port_agent->phy_valid_port_range[1].min_index == 0 && port_agent->phy_valid_port_range[2].min_index == 0 && port_agent->phy_valid_port_range[3].min_index == 0) return SCI_SUCCESS; /* * This is a degenerate case where phy 1 and phy 2 are assigned * to the same port this is explicitly disallowed by the hardware * unless they are part of the same x4 port and this condition was * already checked above. */ if (port_agent->phy_valid_port_range[2].min_index == 1) { return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; } /* * PE0 and PE3 can never have the same SAS Address unless they * are part of the same x4 wide port and we have already checked * for this condition. */ sci_phy_get_sas_address(&ihost->phys[0], &first_address); sci_phy_get_sas_address(&ihost->phys[3], &second_address); if (sci_sas_address_compare(first_address, second_address) == 0) { return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; } /* * PE0 and PE1 are configured into a 2x1 ports make sure that the * SAS Address for PE0 and PE2 are different since they can not be * part of the same port. */ if (port_agent->phy_valid_port_range[0].min_index == 0 && port_agent->phy_valid_port_range[1].min_index == 1) { sci_phy_get_sas_address(&ihost->phys[0], &first_address); sci_phy_get_sas_address(&ihost->phys[2], &second_address); if (sci_sas_address_compare(first_address, second_address) == 0) { return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; } } /* * PE2 and PE3 are configured into a 2x1 ports make sure that the * SAS Address for PE1 and PE3 are different since they can not be * part of the same port. */ if (port_agent->phy_valid_port_range[2].min_index == 2 && port_agent->phy_valid_port_range[3].min_index == 3) { sci_phy_get_sas_address(&ihost->phys[1], &first_address); sci_phy_get_sas_address(&ihost->phys[3], &second_address); if (sci_sas_address_compare(first_address, second_address) == 0) { return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; } } return SCI_SUCCESS; } /* * ****************************************************************************** * Manual port configuration agent routines * ****************************************************************************** */ /* verify all of the phys in the same port are using the same SAS address */ static enum sci_status sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost, struct sci_port_configuration_agent *port_agent) { u32 phy_mask; u32 assigned_phy_mask; struct sci_sas_address sas_address; struct sci_sas_address phy_assigned_address; u8 port_index; u8 phy_index; assigned_phy_mask = 0; sas_address.high = 0; sas_address.low = 0; for (port_index = 0; port_index < SCI_MAX_PORTS; port_index++) { phy_mask = ihost->oem_parameters.ports[port_index].phy_mask; if (!phy_mask) continue; /* * Make sure that one or more of the phys were not already assinged to * a different port. */ if ((phy_mask & ~assigned_phy_mask) == 0) { return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; } /* Find the starting phy index for this round through the loop */ for (phy_index = 0; phy_index < SCI_MAX_PHYS; phy_index++) { if ((phy_mask & (1 << phy_index)) == 0) continue; sci_phy_get_sas_address(&ihost->phys[phy_index], &sas_address); /* * The phy_index can be used as the starting point for the * port range since the hardware starts all logical ports * the same as the PE index. */ port_agent->phy_valid_port_range[phy_index].min_index = port_index; port_agent->phy_valid_port_range[phy_index].max_index = phy_index; if (phy_index != port_index) { return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; } break; } /* * See how many additional phys are being added to this logical port. * Note: We have not moved the current phy_index so we will actually * compare the startting phy with itself. * This is expected and required to add the phy to the port. */ for (; phy_index < SCI_MAX_PHYS; phy_index++) { if ((phy_mask & (1 << phy_index)) == 0) continue; sci_phy_get_sas_address(&ihost->phys[phy_index], &phy_assigned_address); if (sci_sas_address_compare(sas_address, phy_assigned_address) != 0) { /* * The phy mask specified that this phy is part of the same port * as the starting phy and it is not so fail this configuration */ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; } port_agent->phy_valid_port_range[phy_index].min_index = port_index; port_agent->phy_valid_port_range[phy_index].max_index = phy_index; sci_port_add_phy(&ihost->ports[port_index], &ihost->phys[phy_index]); assigned_phy_mask |= (1 << phy_index); } } return sci_port_configuration_agent_validate_ports(ihost, port_agent); } static void mpc_agent_timeout(struct timer_list *t) { u8 index; struct sci_timer *tmr = from_timer(tmr, t, timer); struct sci_port_configuration_agent *port_agent; struct isci_host *ihost; unsigned long flags; u16 configure_phy_mask; port_agent = container_of(tmr, typeof(*port_agent), timer); ihost = container_of(port_agent, typeof(*ihost), port_agent); spin_lock_irqsave(&ihost->scic_lock, flags); if (tmr->cancel) goto done; port_agent->timer_pending = false; /* Find the mask of phys that are reported read but as yet unconfigured into a port */ configure_phy_mask = ~port_agent->phy_configured_mask & port_agent->phy_ready_mask; for (index = 0; index < SCI_MAX_PHYS; index++) { struct isci_phy *iphy = &ihost->phys[index]; if (configure_phy_mask & (1 << index)) { port_agent->link_up_handler(ihost, port_agent, phy_get_non_dummy_port(iphy), iphy); } } done: spin_unlock_irqrestore(&ihost->scic_lock, flags); } static void sci_mpc_agent_link_up(struct isci_host *ihost, struct sci_port_configuration_agent *port_agent, struct isci_port *iport, struct isci_phy *iphy) { /* If the port is NULL then the phy was not assigned to a port. * This is because the phy was not given the same SAS Address as * the other PHYs in the port. */ if (!iport) return; port_agent->phy_ready_mask |= (1 << iphy->phy_index); sci_port_link_up(iport, iphy); if ((iport->active_phy_mask & (1 << iphy->phy_index))) port_agent->phy_configured_mask |= (1 << iphy->phy_index); } /** * sci_mpc_agent_link_down() * @ihost: This is the controller object that receives the link down * notification. * @port_agent: This is the port configuration agent for the controller. * @iport: This is the port object associated with the phy. If the is no * associated port this is an NULL. The port is an invalid * handle only if the phy was never port of this port. This happens when * the phy is not broadcasting the same SAS address as the other phys in the * assigned port. * @iphy: This is the phy object which has gone link down. * * This function handles the manual port configuration link down notifications. * Since all ports and phys are associated at initialization time we just turn * around and notifiy the port object of the link down event. If this PHY is * not associated with a port there is no action taken. Is it possible to get a * link down notification from a phy that has no assocoated port? */ static void sci_mpc_agent_link_down( struct isci_host *ihost, struct sci_port_configuration_agent *port_agent, struct isci_port *iport, struct isci_phy *iphy) { if (iport != NULL) { /* * If we can form a new port from the remainder of the phys * then we want to start the timer to allow the SCI User to * cleanup old devices and rediscover the port before * rebuilding the port with the phys that remain in the ready * state. */ port_agent->phy_ready_mask &= ~(1 << iphy->phy_index); port_agent->phy_configured_mask &= ~(1 << iphy->phy_index); /* * Check to see if there are more phys waiting to be * configured into a port. If there are allow the SCI User * to tear down this port, if necessary, and then reconstruct * the port after the timeout. */ if ((port_agent->phy_configured_mask == 0x0000) && (port_agent->phy_ready_mask != 0x0000) && !port_agent->timer_pending) { port_agent->timer_pending = true; sci_mod_timer(&port_agent->timer, SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT); } sci_port_link_down(iport, iphy); } } /* verify phys are assigned a valid SAS address for automatic port * configuration mode. */ static enum sci_status sci_apc_agent_validate_phy_configuration(struct isci_host *ihost, struct sci_port_configuration_agent *port_agent) { u8 phy_index; u8 port_index; struct sci_sas_address sas_address; struct sci_sas_address phy_assigned_address; phy_index = 0; while (phy_index < SCI_MAX_PHYS) { port_index = phy_index; /* Get the assigned SAS Address for the first PHY on the controller. */ sci_phy_get_sas_address(&ihost->phys[phy_index], &sas_address); while (++phy_index < SCI_MAX_PHYS) { sci_phy_get_sas_address(&ihost->phys[phy_index], &phy_assigned_address); /* Verify each of the SAS address are all the same for every PHY */ if (sci_sas_address_compare(sas_address, phy_assigned_address) == 0) { port_agent->phy_valid_port_range[phy_index].min_index = port_index; port_agent->phy_valid_port_range[phy_index].max_index = phy_index; } else { port_agent->phy_valid_port_range[phy_index].min_index = phy_index; port_agent->phy_valid_port_range[phy_index].max_index = phy_index; break; } } } return sci_port_configuration_agent_validate_ports(ihost, port_agent); } /* * This routine will restart the automatic port configuration timeout * timer for the next time period. This could be caused by either a link * down event or a link up event where we can not yet tell to which a phy * belongs. */ static void sci_apc_agent_start_timer(struct sci_port_configuration_agent *port_agent, u32 timeout) { port_agent->timer_pending = true; sci_mod_timer(&port_agent->timer, timeout); } static void sci_apc_agent_configure_ports(struct isci_host *ihost, struct sci_port_configuration_agent *port_agent, struct isci_phy *iphy, bool start_timer) { u8 port_index; enum sci_status status; struct isci_port *iport; enum SCIC_SDS_APC_ACTIVITY apc_activity = SCIC_SDS_APC_SKIP_PHY; iport = sci_port_configuration_agent_find_port(ihost, iphy); if (iport) { if (sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) apc_activity = SCIC_SDS_APC_ADD_PHY; else apc_activity = SCIC_SDS_APC_SKIP_PHY; } else { /* * There is no matching Port for this PHY so lets search through the * Ports and see if we can add the PHY to its own port or maybe start * the timer and wait to see if a wider port can be made. * * Note the break when we reach the condition of the port id == phy id */ for (port_index = port_agent->phy_valid_port_range[iphy->phy_index].min_index; port_index <= port_agent->phy_valid_port_range[iphy->phy_index].max_index; port_index++) { iport = &ihost->ports[port_index]; /* First we must make sure that this PHY can be added to this Port. */ if (sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) { /* * Port contains a PHY with a greater PHY ID than the current * PHY that has gone link up. This phy can not be part of any * port so skip it and move on. */ if (iport->active_phy_mask > (1 << iphy->phy_index)) { apc_activity = SCIC_SDS_APC_SKIP_PHY; break; } /* * We have reached the end of our Port list and have not found * any reason why we should not either add the PHY to the port * or wait for more phys to become active. */ if (iport->physical_port_index == iphy->phy_index) { /* * The Port either has no active PHYs. * Consider that if the port had any active PHYs we would have * or active PHYs with * a lower PHY Id than this PHY. */ if (apc_activity != SCIC_SDS_APC_START_TIMER) { apc_activity = SCIC_SDS_APC_ADD_PHY; } break; } /* * The current Port has no active PHYs and this PHY could be part * of this Port. Since we dont know as yet setup to start the * timer and see if there is a better configuration. */ if (iport->active_phy_mask == 0) { apc_activity = SCIC_SDS_APC_START_TIMER; } } else if (iport->active_phy_mask != 0) { /* * The Port has an active phy and the current Phy can not * participate in this port so skip the PHY and see if * there is a better configuration. */ apc_activity = SCIC_SDS_APC_SKIP_PHY; } } } /* * Check to see if the start timer operations should instead map to an * add phy operation. This is caused because we have been waiting to * add a phy to a port but could not becuase the automatic port * configuration engine had a choice of possible ports for the phy. * Since we have gone through a timeout we are going to restrict the * choice to the smallest possible port. */ if ( (start_timer == false) && (apc_activity == SCIC_SDS_APC_START_TIMER) ) { apc_activity = SCIC_SDS_APC_ADD_PHY; } switch (apc_activity) { case SCIC_SDS_APC_ADD_PHY: status = sci_port_add_phy(iport, iphy); if (status == SCI_SUCCESS) { port_agent->phy_configured_mask |= (1 << iphy->phy_index); } break; case SCIC_SDS_APC_START_TIMER: sci_apc_agent_start_timer(port_agent, SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION); break; case SCIC_SDS_APC_SKIP_PHY: default: /* do nothing the PHY can not be made part of a port at this time. */ break; } } /** * sci_apc_agent_link_up - handle apc link up events * @ihost: This is the controller object that receives the link up * notification. * @port_agent: This is the port configuration agent for the controller. * @iport: This is the port object associated with the phy. If the is no * associated port this is an NULL. * @iphy: This is the phy object which has gone link up. * * This method handles the automatic port configuration for link up * notifications. Is it possible to get a link down notification from a phy * that has no assocoated port? */ static void sci_apc_agent_link_up(struct isci_host *ihost, struct sci_port_configuration_agent *port_agent, struct isci_port *iport, struct isci_phy *iphy) { u8 phy_index = iphy->phy_index; if (!iport) { /* the phy is not the part of this port */ port_agent->phy_ready_mask |= 1 << phy_index; sci_apc_agent_start_timer(port_agent, SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION); } else { /* the phy is already the part of the port */ port_agent->phy_ready_mask |= 1 << phy_index; sci_port_link_up(iport, iphy); } } /** * sci_apc_agent_link_down() * @ihost: This is the controller object that receives the link down * notification. * @port_agent: This is the port configuration agent for the controller. * @iport: This is the port object associated with the phy. If the is no * associated port this is an NULL. * @iphy: This is the phy object which has gone link down. * * This method handles the automatic port configuration link down * notifications. not associated with a port there is no action taken. Is it * possible to get a link down notification from a phy that has no assocoated * port? */ static void sci_apc_agent_link_down( struct isci_host *ihost, struct sci_port_configuration_agent *port_agent, struct isci_port *iport, struct isci_phy *iphy) { port_agent->phy_ready_mask &= ~(1 << iphy->phy_index); if (!iport) return; if (port_agent->phy_configured_mask & (1 << iphy->phy_index)) { enum sci_status status; status = sci_port_remove_phy(iport, iphy); if (status == SCI_SUCCESS) port_agent->phy_configured_mask &= ~(1 << iphy->phy_index); } } /* configure the phys into ports when the timer fires */ static void apc_agent_timeout(struct timer_list *t) { u32 index; struct sci_timer *tmr = from_timer(tmr, t, timer); struct sci_port_configuration_agent *port_agent; struct isci_host *ihost; unsigned long flags; u16 configure_phy_mask; port_agent = container_of(tmr, typeof(*port_agent), timer); ihost = container_of(port_agent, typeof(*ihost), port_agent); spin_lock_irqsave(&ihost->scic_lock, flags); if (tmr->cancel) goto done; port_agent->timer_pending = false; configure_phy_mask = ~port_agent->phy_configured_mask & port_agent->phy_ready_mask; if (!configure_phy_mask) goto done; for (index = 0; index < SCI_MAX_PHYS; index++) { if ((configure_phy_mask & (1 << index)) == 0) continue; sci_apc_agent_configure_ports(ihost, port_agent, &ihost->phys[index], false); } if (is_controller_start_complete(ihost)) sci_controller_transition_to_ready(ihost, SCI_SUCCESS); done: spin_unlock_irqrestore(&ihost->scic_lock, flags); } /* * ****************************************************************************** * Public port configuration agent routines * ****************************************************************************** */ /* * This method will construct the port configuration agent for operation. This * call is universal for both manual port configuration and automatic port * configuration modes. */ void sci_port_configuration_agent_construct( struct sci_port_configuration_agent *port_agent) { u32 index; port_agent->phy_configured_mask = 0x00; port_agent->phy_ready_mask = 0x00; port_agent->link_up_handler = NULL; port_agent->link_down_handler = NULL; port_agent->timer_pending = false; for (index = 0; index < SCI_MAX_PORTS; index++) { port_agent->phy_valid_port_range[index].min_index = 0; port_agent->phy_valid_port_range[index].max_index = 0; } } bool is_port_config_apc(struct isci_host *ihost) { return ihost->port_agent.link_up_handler == sci_apc_agent_link_up; } enum sci_status sci_port_configuration_agent_initialize( struct isci_host *ihost, struct sci_port_configuration_agent *port_agent) { enum sci_status status; enum sci_port_configuration_mode mode; mode = ihost->oem_parameters.controller.mode_type; if (mode == SCIC_PORT_MANUAL_CONFIGURATION_MODE) { status = sci_mpc_agent_validate_phy_configuration( ihost, port_agent); port_agent->link_up_handler = sci_mpc_agent_link_up; port_agent->link_down_handler = sci_mpc_agent_link_down; sci_init_timer(&port_agent->timer, mpc_agent_timeout); } else { status = sci_apc_agent_validate_phy_configuration( ihost, port_agent); port_agent->link_up_handler = sci_apc_agent_link_up; port_agent->link_down_handler = sci_apc_agent_link_down; sci_init_timer(&port_agent->timer, apc_agent_timeout); } return status; }
linux-master
drivers/scsi/isci/port_config.c
/* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * The full GNU General Public License is included in this distribution * in the file called LICENSE.GPL. * * BSD LICENSE * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "isci.h" #include "host.h" #include "phy.h" #include "scu_event_codes.h" #include "probe_roms.h" #undef C #define C(a) (#a) static const char *phy_state_name(enum sci_phy_states state) { static const char * const strings[] = PHY_STATES; return strings[state]; } #undef C /* Maximum arbitration wait time in micro-seconds */ #define SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME (700) enum sas_linkrate sci_phy_linkrate(struct isci_phy *iphy) { return iphy->max_negotiated_speed; } static struct isci_host *phy_to_host(struct isci_phy *iphy) { struct isci_phy *table = iphy - iphy->phy_index; struct isci_host *ihost = container_of(table, typeof(*ihost), phys[0]); return ihost; } static struct device *sciphy_to_dev(struct isci_phy *iphy) { return &phy_to_host(iphy)->pdev->dev; } static enum sci_status sci_phy_transport_layer_initialization(struct isci_phy *iphy, struct scu_transport_layer_registers __iomem *reg) { u32 tl_control; iphy->transport_layer_registers = reg; writel(SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX, &iphy->transport_layer_registers->stp_rni); /* * Hardware team recommends that we enable the STP prefetch for all * transports */ tl_control = readl(&iphy->transport_layer_registers->control); tl_control |= SCU_TLCR_GEN_BIT(STP_WRITE_DATA_PREFETCH); writel(tl_control, &iphy->transport_layer_registers->control); return SCI_SUCCESS; } static enum sci_status sci_phy_link_layer_initialization(struct isci_phy *iphy, struct scu_link_layer_registers __iomem *llr) { struct isci_host *ihost = iphy->owning_port->owning_controller; struct sci_phy_user_params *phy_user; struct sci_phy_oem_params *phy_oem; int phy_idx = iphy->phy_index; struct sci_phy_cap phy_cap; u32 phy_configuration; u32 parity_check = 0; u32 parity_count = 0; u32 llctl, link_rate; u32 clksm_value = 0; u32 sp_timeouts = 0; phy_user = &ihost->user_parameters.phys[phy_idx]; phy_oem = &ihost->oem_parameters.phys[phy_idx]; iphy->link_layer_registers = llr; /* Set our IDENTIFY frame data */ #define SCI_END_DEVICE 0x01 writel(SCU_SAS_TIID_GEN_BIT(SMP_INITIATOR) | SCU_SAS_TIID_GEN_BIT(SSP_INITIATOR) | SCU_SAS_TIID_GEN_BIT(STP_INITIATOR) | SCU_SAS_TIID_GEN_BIT(DA_SATA_HOST) | SCU_SAS_TIID_GEN_VAL(DEVICE_TYPE, SCI_END_DEVICE), &llr->transmit_identification); /* Write the device SAS Address */ writel(0xFEDCBA98, &llr->sas_device_name_high); writel(phy_idx, &llr->sas_device_name_low); /* Write the source SAS Address */ writel(phy_oem->sas_address.high, &llr->source_sas_address_high); writel(phy_oem->sas_address.low, &llr->source_sas_address_low); /* Clear and Set the PHY Identifier */ writel(0, &llr->identify_frame_phy_id); writel(SCU_SAS_TIPID_GEN_VALUE(ID, phy_idx), &llr->identify_frame_phy_id); /* Change the initial state of the phy configuration register */ phy_configuration = readl(&llr->phy_configuration); /* Hold OOB state machine in reset */ phy_configuration |= SCU_SAS_PCFG_GEN_BIT(OOB_RESET); writel(phy_configuration, &llr->phy_configuration); /* Configure the SNW capabilities */ phy_cap.all = 0; phy_cap.start = 1; phy_cap.gen3_no_ssc = 1; phy_cap.gen2_no_ssc = 1; phy_cap.gen1_no_ssc = 1; if (ihost->oem_parameters.controller.do_enable_ssc) { struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe; struct scu_afe_transceiver __iomem *xcvr = &afe->scu_afe_xcvr[phy_idx]; struct isci_pci_info *pci_info = to_pci_info(ihost->pdev); bool en_sas = false; bool en_sata = false; u32 sas_type = 0; u32 sata_spread = 0x2; u32 sas_spread = 0x2; phy_cap.gen3_ssc = 1; phy_cap.gen2_ssc = 1; phy_cap.gen1_ssc = 1; if (pci_info->orom->hdr.version < ISCI_ROM_VER_1_1) en_sas = en_sata = true; else { sata_spread = ihost->oem_parameters.controller.ssc_sata_tx_spread_level; sas_spread = ihost->oem_parameters.controller.ssc_sas_tx_spread_level; if (sata_spread) en_sata = true; if (sas_spread) { en_sas = true; sas_type = ihost->oem_parameters.controller.ssc_sas_tx_type; } } if (en_sas) { u32 reg; reg = readl(&xcvr->afe_xcvr_control0); reg |= (0x00100000 | (sas_type << 19)); writel(reg, &xcvr->afe_xcvr_control0); reg = readl(&xcvr->afe_tx_ssc_control); reg |= sas_spread << 8; writel(reg, &xcvr->afe_tx_ssc_control); } if (en_sata) { u32 reg; reg = readl(&xcvr->afe_tx_ssc_control); reg |= sata_spread; writel(reg, &xcvr->afe_tx_ssc_control); reg = readl(&llr->stp_control); reg |= 1 << 12; writel(reg, &llr->stp_control); } } /* The SAS specification indicates that the phy_capabilities that * are transmitted shall have an even parity. Calculate the parity. */ parity_check = phy_cap.all; while (parity_check != 0) { if (parity_check & 0x1) parity_count++; parity_check >>= 1; } /* If parity indicates there are an odd number of bits set, then * set the parity bit to 1 in the phy capabilities. */ if ((parity_count % 2) != 0) phy_cap.parity = 1; writel(phy_cap.all, &llr->phy_capabilities); /* Set the enable spinup period but disable the ability to send * notify enable spinup */ writel(SCU_ENSPINUP_GEN_VAL(COUNT, phy_user->notify_enable_spin_up_insertion_frequency), &llr->notify_enable_spinup_control); /* Write the ALIGN Insertion Ferequency for connected phy and * inpendent of connected state */ clksm_value = SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(CONNECTED, phy_user->in_connection_align_insertion_frequency); clksm_value |= SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(GENERAL, phy_user->align_insertion_frequency); writel(clksm_value, &llr->clock_skew_management); if (is_c0(ihost->pdev) || is_c1(ihost->pdev)) { writel(0x04210400, &llr->afe_lookup_table_control); writel(0x020A7C05, &llr->sas_primitive_timeout); } else writel(0x02108421, &llr->afe_lookup_table_control); llctl = SCU_SAS_LLCTL_GEN_VAL(NO_OUTBOUND_TASK_TIMEOUT, (u8)ihost->user_parameters.no_outbound_task_timeout); switch (phy_user->max_speed_generation) { case SCIC_SDS_PARM_GEN3_SPEED: link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN3; break; case SCIC_SDS_PARM_GEN2_SPEED: link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN2; break; default: link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1; break; } llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate); writel(llctl, &llr->link_layer_control); sp_timeouts = readl(&llr->sas_phy_timeouts); /* Clear the default 0x36 (54us) RATE_CHANGE timeout value. */ sp_timeouts &= ~SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0xFF); /* Set RATE_CHANGE timeout value to 0x3B (59us). This ensures SCU can * lock with 3Gb drive when SCU max rate is set to 1.5Gb. */ sp_timeouts |= SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0x3B); writel(sp_timeouts, &llr->sas_phy_timeouts); if (is_a2(ihost->pdev)) { /* Program the max ARB time for the PHY to 700us so we * inter-operate with the PMC expander which shuts down * PHYs if the expander PHY generates too many breaks. * This time value will guarantee that the initiator PHY * will generate the break. */ writel(SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME, &llr->maximum_arbitration_wait_timer_timeout); } /* Disable link layer hang detection, rely on the OS timeout for * I/O timeouts. */ writel(0, &llr->link_layer_hang_detection_timeout); /* We can exit the initial state to the stopped state */ sci_change_state(&iphy->sm, SCI_PHY_STOPPED); return SCI_SUCCESS; } static void phy_sata_timeout(struct timer_list *t) { struct sci_timer *tmr = from_timer(tmr, t, timer); struct isci_phy *iphy = container_of(tmr, typeof(*iphy), sata_timer); struct isci_host *ihost = iphy->owning_port->owning_controller; unsigned long flags; spin_lock_irqsave(&ihost->scic_lock, flags); if (tmr->cancel) goto done; dev_dbg(sciphy_to_dev(iphy), "%s: SCIC SDS Phy 0x%p did not receive signature fis before " "timeout.\n", __func__, iphy); sci_change_state(&iphy->sm, SCI_PHY_STARTING); done: spin_unlock_irqrestore(&ihost->scic_lock, flags); } /** * phy_get_non_dummy_port() - This method returns the port currently containing * this phy. If the phy is currently contained by the dummy port, then the phy * is considered to not be part of a port. * * @iphy: This parameter specifies the phy for which to retrieve the * containing port. * * This method returns a handle to a port that contains the supplied phy. * NULL This value is returned if the phy is not part of a real * port (i.e. it's contained in the dummy port). !NULL All other * values indicate a handle/pointer to the port containing the phy. */ struct isci_port *phy_get_non_dummy_port(struct isci_phy *iphy) { struct isci_port *iport = iphy->owning_port; if (iport->physical_port_index == SCIC_SDS_DUMMY_PORT) return NULL; return iphy->owning_port; } /* * sci_phy_set_port() - This method will assign a port to the phy object. */ void sci_phy_set_port( struct isci_phy *iphy, struct isci_port *iport) { iphy->owning_port = iport; if (iphy->bcn_received_while_port_unassigned) { iphy->bcn_received_while_port_unassigned = false; sci_port_broadcast_change_received(iphy->owning_port, iphy); } } enum sci_status sci_phy_initialize(struct isci_phy *iphy, struct scu_transport_layer_registers __iomem *tl, struct scu_link_layer_registers __iomem *ll) { /* Perfrom the initialization of the TL hardware */ sci_phy_transport_layer_initialization(iphy, tl); /* Perofrm the initialization of the PE hardware */ sci_phy_link_layer_initialization(iphy, ll); /* There is nothing that needs to be done in this state just * transition to the stopped state */ sci_change_state(&iphy->sm, SCI_PHY_STOPPED); return SCI_SUCCESS; } /** * sci_phy_setup_transport() - This method assigns the direct attached device ID for this phy. * * @iphy: The phy for which the direct attached device id is to * be assigned. * @device_id: The direct attached device ID to assign to the phy. * This will either be the RNi for the device or an invalid RNi if there * is no current device assigned to the phy. */ void sci_phy_setup_transport(struct isci_phy *iphy, u32 device_id) { u32 tl_control; writel(device_id, &iphy->transport_layer_registers->stp_rni); /* * The read should guarantee that the first write gets posted * before the next write */ tl_control = readl(&iphy->transport_layer_registers->control); tl_control |= SCU_TLCR_GEN_BIT(CLEAR_TCI_NCQ_MAPPING_TABLE); writel(tl_control, &iphy->transport_layer_registers->control); } static void sci_phy_suspend(struct isci_phy *iphy) { u32 scu_sas_pcfg_value; scu_sas_pcfg_value = readl(&iphy->link_layer_registers->phy_configuration); scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE); writel(scu_sas_pcfg_value, &iphy->link_layer_registers->phy_configuration); sci_phy_setup_transport(iphy, SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX); } void sci_phy_resume(struct isci_phy *iphy) { u32 scu_sas_pcfg_value; scu_sas_pcfg_value = readl(&iphy->link_layer_registers->phy_configuration); scu_sas_pcfg_value &= ~SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE); writel(scu_sas_pcfg_value, &iphy->link_layer_registers->phy_configuration); } void sci_phy_get_sas_address(struct isci_phy *iphy, struct sci_sas_address *sas) { sas->high = readl(&iphy->link_layer_registers->source_sas_address_high); sas->low = readl(&iphy->link_layer_registers->source_sas_address_low); } void sci_phy_get_attached_sas_address(struct isci_phy *iphy, struct sci_sas_address *sas) { struct sas_identify_frame *iaf; iaf = &iphy->frame_rcvd.iaf; memcpy(sas, iaf->sas_addr, SAS_ADDR_SIZE); } void sci_phy_get_protocols(struct isci_phy *iphy, struct sci_phy_proto *proto) { proto->all = readl(&iphy->link_layer_registers->transmit_identification); } enum sci_status sci_phy_start(struct isci_phy *iphy) { enum sci_phy_states state = iphy->sm.current_state_id; if (state != SCI_PHY_STOPPED) { dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n", __func__, phy_state_name(state)); return SCI_FAILURE_INVALID_STATE; } sci_change_state(&iphy->sm, SCI_PHY_STARTING); return SCI_SUCCESS; } enum sci_status sci_phy_stop(struct isci_phy *iphy) { enum sci_phy_states state = iphy->sm.current_state_id; switch (state) { case SCI_PHY_SUB_INITIAL: case SCI_PHY_SUB_AWAIT_OSSP_EN: case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN: case SCI_PHY_SUB_AWAIT_SAS_POWER: case SCI_PHY_SUB_AWAIT_SATA_POWER: case SCI_PHY_SUB_AWAIT_SATA_PHY_EN: case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN: case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: case SCI_PHY_SUB_FINAL: case SCI_PHY_READY: break; default: dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n", __func__, phy_state_name(state)); return SCI_FAILURE_INVALID_STATE; } sci_change_state(&iphy->sm, SCI_PHY_STOPPED); return SCI_SUCCESS; } enum sci_status sci_phy_reset(struct isci_phy *iphy) { enum sci_phy_states state = iphy->sm.current_state_id; if (state != SCI_PHY_READY) { dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n", __func__, phy_state_name(state)); return SCI_FAILURE_INVALID_STATE; } sci_change_state(&iphy->sm, SCI_PHY_RESETTING); return SCI_SUCCESS; } enum sci_status sci_phy_consume_power_handler(struct isci_phy *iphy) { enum sci_phy_states state = iphy->sm.current_state_id; switch (state) { case SCI_PHY_SUB_AWAIT_SAS_POWER: { u32 enable_spinup; enable_spinup = readl(&iphy->link_layer_registers->notify_enable_spinup_control); enable_spinup |= SCU_ENSPINUP_GEN_BIT(ENABLE); writel(enable_spinup, &iphy->link_layer_registers->notify_enable_spinup_control); /* Change state to the final state this substate machine has run to completion */ sci_change_state(&iphy->sm, SCI_PHY_SUB_FINAL); return SCI_SUCCESS; } case SCI_PHY_SUB_AWAIT_SATA_POWER: { u32 scu_sas_pcfg_value; /* Release the spinup hold state and reset the OOB state machine */ scu_sas_pcfg_value = readl(&iphy->link_layer_registers->phy_configuration); scu_sas_pcfg_value &= ~(SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD) | SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE)); scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_RESET); writel(scu_sas_pcfg_value, &iphy->link_layer_registers->phy_configuration); /* Now restart the OOB operation */ scu_sas_pcfg_value &= ~SCU_SAS_PCFG_GEN_BIT(OOB_RESET); scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE); writel(scu_sas_pcfg_value, &iphy->link_layer_registers->phy_configuration); /* Change state to the final state this substate machine has run to completion */ sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_PHY_EN); return SCI_SUCCESS; } default: dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n", __func__, phy_state_name(state)); return SCI_FAILURE_INVALID_STATE; } } static void sci_phy_start_sas_link_training(struct isci_phy *iphy) { /* continue the link training for the phy as if it were a SAS PHY * instead of a SATA PHY. This is done because the completion queue had a SAS * PHY DETECTED event when the state machine was expecting a SATA PHY event. */ u32 phy_control; phy_control = readl(&iphy->link_layer_registers->phy_configuration); phy_control |= SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD); writel(phy_control, &iphy->link_layer_registers->phy_configuration); sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SAS_SPEED_EN); iphy->protocol = SAS_PROTOCOL_SSP; } static void sci_phy_start_sata_link_training(struct isci_phy *iphy) { /* This method continues the link training for the phy as if it were a SATA PHY * instead of a SAS PHY. This is done because the completion queue had a SATA * SPINUP HOLD event when the state machine was expecting a SAS PHY event. none */ sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_POWER); iphy->protocol = SAS_PROTOCOL_SATA; } /** * sci_phy_complete_link_training - perform processing common to * all protocols upon completion of link training. * @iphy: This parameter specifies the phy object for which link training * has completed. * @max_link_rate: This parameter specifies the maximum link rate to be * associated with this phy. * @next_state: This parameter specifies the next state for the phy's starting * sub-state machine. * */ static void sci_phy_complete_link_training(struct isci_phy *iphy, enum sas_linkrate max_link_rate, u32 next_state) { iphy->max_negotiated_speed = max_link_rate; sci_change_state(&iphy->sm, next_state); } static const char *phy_event_name(u32 event_code) { switch (scu_get_event_code(event_code)) { case SCU_EVENT_PORT_SELECTOR_DETECTED: return "port selector"; case SCU_EVENT_SENT_PORT_SELECTION: return "port selection"; case SCU_EVENT_HARD_RESET_TRANSMITTED: return "tx hard reset"; case SCU_EVENT_HARD_RESET_RECEIVED: return "rx hard reset"; case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT: return "identify timeout"; case SCU_EVENT_LINK_FAILURE: return "link fail"; case SCU_EVENT_SATA_SPINUP_HOLD: return "sata spinup hold"; case SCU_EVENT_SAS_15_SSC: case SCU_EVENT_SAS_15: return "sas 1.5"; case SCU_EVENT_SAS_30_SSC: case SCU_EVENT_SAS_30: return "sas 3.0"; case SCU_EVENT_SAS_60_SSC: case SCU_EVENT_SAS_60: return "sas 6.0"; case SCU_EVENT_SATA_15_SSC: case SCU_EVENT_SATA_15: return "sata 1.5"; case SCU_EVENT_SATA_30_SSC: case SCU_EVENT_SATA_30: return "sata 3.0"; case SCU_EVENT_SATA_60_SSC: case SCU_EVENT_SATA_60: return "sata 6.0"; case SCU_EVENT_SAS_PHY_DETECTED: return "sas detect"; case SCU_EVENT_SATA_PHY_DETECTED: return "sata detect"; default: return "unknown"; } } #define phy_event_dbg(iphy, state, code) \ dev_dbg(sciphy_to_dev(iphy), "phy-%d:%d: %s event: %s (%x)\n", \ phy_to_host(iphy)->id, iphy->phy_index, \ phy_state_name(state), phy_event_name(code), code) #define phy_event_warn(iphy, state, code) \ dev_warn(sciphy_to_dev(iphy), "phy-%d:%d: %s event: %s (%x)\n", \ phy_to_host(iphy)->id, iphy->phy_index, \ phy_state_name(state), phy_event_name(code), code) static void scu_link_layer_set_txcomsas_timeout(struct isci_phy *iphy, u32 timeout) { u32 val; /* Extend timeout */ val = readl(&iphy->link_layer_registers->transmit_comsas_signal); val &= ~SCU_SAS_LLTXCOMSAS_GEN_VAL(NEGTIME, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_MASK); val |= SCU_SAS_LLTXCOMSAS_GEN_VAL(NEGTIME, timeout); writel(val, &iphy->link_layer_registers->transmit_comsas_signal); } enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) { enum sci_phy_states state = iphy->sm.current_state_id; switch (state) { case SCI_PHY_SUB_AWAIT_OSSP_EN: switch (scu_get_event_code(event_code)) { case SCU_EVENT_SAS_PHY_DETECTED: sci_phy_start_sas_link_training(iphy); iphy->is_in_link_training = true; break; case SCU_EVENT_SATA_SPINUP_HOLD: sci_phy_start_sata_link_training(iphy); iphy->is_in_link_training = true; break; case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT: /* Extend timeout value */ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED); /* Start the oob/sn state machine over again */ sci_change_state(&iphy->sm, SCI_PHY_STARTING); break; default: phy_event_dbg(iphy, state, event_code); return SCI_FAILURE; } return SCI_SUCCESS; case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN: switch (scu_get_event_code(event_code)) { case SCU_EVENT_SAS_PHY_DETECTED: /* * Why is this being reported again by the controller? * We would re-enter this state so just stay here */ break; case SCU_EVENT_SAS_15: case SCU_EVENT_SAS_15_SSC: sci_phy_complete_link_training(iphy, SAS_LINK_RATE_1_5_GBPS, SCI_PHY_SUB_AWAIT_IAF_UF); break; case SCU_EVENT_SAS_30: case SCU_EVENT_SAS_30_SSC: sci_phy_complete_link_training(iphy, SAS_LINK_RATE_3_0_GBPS, SCI_PHY_SUB_AWAIT_IAF_UF); break; case SCU_EVENT_SAS_60: case SCU_EVENT_SAS_60_SSC: sci_phy_complete_link_training(iphy, SAS_LINK_RATE_6_0_GBPS, SCI_PHY_SUB_AWAIT_IAF_UF); break; case SCU_EVENT_SATA_SPINUP_HOLD: /* * We were doing SAS PHY link training and received a SATA PHY event * continue OOB/SN as if this were a SATA PHY */ sci_phy_start_sata_link_training(iphy); break; case SCU_EVENT_LINK_FAILURE: /* Change the timeout value to default */ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); /* Link failure change state back to the starting state */ sci_change_state(&iphy->sm, SCI_PHY_STARTING); break; case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT: /* Extend the timeout value */ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED); /* Start the oob/sn state machine over again */ sci_change_state(&iphy->sm, SCI_PHY_STARTING); break; default: phy_event_warn(iphy, state, event_code); return SCI_FAILURE; } return SCI_SUCCESS; case SCI_PHY_SUB_AWAIT_IAF_UF: switch (scu_get_event_code(event_code)) { case SCU_EVENT_SAS_PHY_DETECTED: /* Backup the state machine */ sci_phy_start_sas_link_training(iphy); break; case SCU_EVENT_SATA_SPINUP_HOLD: /* We were doing SAS PHY link training and received a * SATA PHY event continue OOB/SN as if this were a * SATA PHY */ sci_phy_start_sata_link_training(iphy); break; case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT: /* Extend the timeout value */ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED); /* Start the oob/sn state machine over again */ sci_change_state(&iphy->sm, SCI_PHY_STARTING); break; case SCU_EVENT_LINK_FAILURE: scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); fallthrough; case SCU_EVENT_HARD_RESET_RECEIVED: /* Start the oob/sn state machine over again */ sci_change_state(&iphy->sm, SCI_PHY_STARTING); break; default: phy_event_warn(iphy, state, event_code); return SCI_FAILURE; } return SCI_SUCCESS; case SCI_PHY_SUB_AWAIT_SAS_POWER: switch (scu_get_event_code(event_code)) { case SCU_EVENT_LINK_FAILURE: /* Change the timeout value to default */ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); /* Link failure change state back to the starting state */ sci_change_state(&iphy->sm, SCI_PHY_STARTING); break; default: phy_event_warn(iphy, state, event_code); return SCI_FAILURE; } return SCI_SUCCESS; case SCI_PHY_SUB_AWAIT_SATA_POWER: switch (scu_get_event_code(event_code)) { case SCU_EVENT_LINK_FAILURE: /* Change the timeout value to default */ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); /* Link failure change state back to the starting state */ sci_change_state(&iphy->sm, SCI_PHY_STARTING); break; case SCU_EVENT_SATA_SPINUP_HOLD: /* These events are received every 10ms and are * expected while in this state */ break; case SCU_EVENT_SAS_PHY_DETECTED: /* There has been a change in the phy type before OOB/SN for the * SATA finished start down the SAS link traning path. */ sci_phy_start_sas_link_training(iphy); break; default: phy_event_warn(iphy, state, event_code); return SCI_FAILURE; } return SCI_SUCCESS; case SCI_PHY_SUB_AWAIT_SATA_PHY_EN: switch (scu_get_event_code(event_code)) { case SCU_EVENT_LINK_FAILURE: /* Change the timeout value to default */ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); /* Link failure change state back to the starting state */ sci_change_state(&iphy->sm, SCI_PHY_STARTING); break; case SCU_EVENT_SATA_SPINUP_HOLD: /* These events might be received since we dont know how many may be in * the completion queue while waiting for power */ break; case SCU_EVENT_SATA_PHY_DETECTED: iphy->protocol = SAS_PROTOCOL_SATA; /* We have received the SATA PHY notification change state */ sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_SPEED_EN); break; case SCU_EVENT_SAS_PHY_DETECTED: /* There has been a change in the phy type before OOB/SN for the * SATA finished start down the SAS link traning path. */ sci_phy_start_sas_link_training(iphy); break; default: phy_event_warn(iphy, state, event_code); return SCI_FAILURE; } return SCI_SUCCESS; case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN: switch (scu_get_event_code(event_code)) { case SCU_EVENT_SATA_PHY_DETECTED: /* * The hardware reports multiple SATA PHY detected events * ignore the extras */ break; case SCU_EVENT_SATA_15: case SCU_EVENT_SATA_15_SSC: sci_phy_complete_link_training(iphy, SAS_LINK_RATE_1_5_GBPS, SCI_PHY_SUB_AWAIT_SIG_FIS_UF); break; case SCU_EVENT_SATA_30: case SCU_EVENT_SATA_30_SSC: sci_phy_complete_link_training(iphy, SAS_LINK_RATE_3_0_GBPS, SCI_PHY_SUB_AWAIT_SIG_FIS_UF); break; case SCU_EVENT_SATA_60: case SCU_EVENT_SATA_60_SSC: sci_phy_complete_link_training(iphy, SAS_LINK_RATE_6_0_GBPS, SCI_PHY_SUB_AWAIT_SIG_FIS_UF); break; case SCU_EVENT_LINK_FAILURE: /* Change the timeout value to default */ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); /* Link failure change state back to the starting state */ sci_change_state(&iphy->sm, SCI_PHY_STARTING); break; case SCU_EVENT_SAS_PHY_DETECTED: /* * There has been a change in the phy type before OOB/SN for the * SATA finished start down the SAS link traning path. */ sci_phy_start_sas_link_training(iphy); break; default: phy_event_warn(iphy, state, event_code); return SCI_FAILURE; } return SCI_SUCCESS; case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: switch (scu_get_event_code(event_code)) { case SCU_EVENT_SATA_PHY_DETECTED: /* Backup the state machine */ sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_SPEED_EN); break; case SCU_EVENT_LINK_FAILURE: /* Change the timeout value to default */ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); /* Link failure change state back to the starting state */ sci_change_state(&iphy->sm, SCI_PHY_STARTING); break; default: phy_event_warn(iphy, state, event_code); return SCI_FAILURE; } return SCI_SUCCESS; case SCI_PHY_READY: switch (scu_get_event_code(event_code)) { case SCU_EVENT_LINK_FAILURE: /* Set default timeout */ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); /* Link failure change state back to the starting state */ sci_change_state(&iphy->sm, SCI_PHY_STARTING); break; case SCU_EVENT_BROADCAST_CHANGE: case SCU_EVENT_BROADCAST_SES: case SCU_EVENT_BROADCAST_RESERVED0: case SCU_EVENT_BROADCAST_RESERVED1: case SCU_EVENT_BROADCAST_EXPANDER: case SCU_EVENT_BROADCAST_AEN: /* Broadcast change received. Notify the port. */ if (phy_get_non_dummy_port(iphy) != NULL) sci_port_broadcast_change_received(iphy->owning_port, iphy); else iphy->bcn_received_while_port_unassigned = true; break; case SCU_EVENT_BROADCAST_RESERVED3: case SCU_EVENT_BROADCAST_RESERVED4: default: phy_event_warn(iphy, state, event_code); return SCI_FAILURE_INVALID_STATE; } return SCI_SUCCESS; case SCI_PHY_RESETTING: switch (scu_get_event_code(event_code)) { case SCU_EVENT_HARD_RESET_TRANSMITTED: /* Link failure change state back to the starting state */ sci_change_state(&iphy->sm, SCI_PHY_STARTING); break; default: phy_event_warn(iphy, state, event_code); return SCI_FAILURE_INVALID_STATE; } return SCI_SUCCESS; default: dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n", __func__, phy_state_name(state)); return SCI_FAILURE_INVALID_STATE; } } enum sci_status sci_phy_frame_handler(struct isci_phy *iphy, u32 frame_index) { enum sci_phy_states state = iphy->sm.current_state_id; struct isci_host *ihost = iphy->owning_port->owning_controller; enum sci_status result; unsigned long flags; switch (state) { case SCI_PHY_SUB_AWAIT_IAF_UF: { u32 *frame_words; struct sas_identify_frame iaf; result = sci_unsolicited_frame_control_get_header(&ihost->uf_control, frame_index, (void **)&frame_words); if (result != SCI_SUCCESS) return result; sci_swab32_cpy(&iaf, frame_words, sizeof(iaf) / sizeof(u32)); if (iaf.frame_type == 0) { u32 state; spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags); memcpy(&iphy->frame_rcvd.iaf, &iaf, sizeof(iaf)); spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags); if (iaf.smp_tport) { /* We got the IAF for an expander PHY go to the final * state since there are no power requirements for * expander phys. */ state = SCI_PHY_SUB_FINAL; } else { /* We got the IAF we can now go to the await spinup * semaphore state */ state = SCI_PHY_SUB_AWAIT_SAS_POWER; } sci_change_state(&iphy->sm, state); result = SCI_SUCCESS; } else dev_warn(sciphy_to_dev(iphy), "%s: PHY starting substate machine received " "unexpected frame id %x\n", __func__, frame_index); sci_controller_release_frame(ihost, frame_index); return result; } case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: { struct dev_to_host_fis *frame_header; u32 *fis_frame_data; result = sci_unsolicited_frame_control_get_header(&ihost->uf_control, frame_index, (void **)&frame_header); if (result != SCI_SUCCESS) return result; if ((frame_header->fis_type == FIS_REGD2H) && !(frame_header->status & ATA_BUSY)) { sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, frame_index, (void **)&fis_frame_data); spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags); sci_controller_copy_sata_response(&iphy->frame_rcvd.fis, frame_header, fis_frame_data); spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags); /* got IAF we can now go to the await spinup semaphore state */ sci_change_state(&iphy->sm, SCI_PHY_SUB_FINAL); result = SCI_SUCCESS; } else dev_warn(sciphy_to_dev(iphy), "%s: PHY starting substate machine received " "unexpected frame id %x\n", __func__, frame_index); /* Regardless of the result we are done with this frame with it */ sci_controller_release_frame(ihost, frame_index); return result; } default: dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n", __func__, phy_state_name(state)); return SCI_FAILURE_INVALID_STATE; } } static void sci_phy_starting_initial_substate_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); /* This is just an temporary state go off to the starting state */ sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_OSSP_EN); } static void sci_phy_starting_await_sas_power_substate_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); struct isci_host *ihost = iphy->owning_port->owning_controller; sci_controller_power_control_queue_insert(ihost, iphy); } static void sci_phy_starting_await_sas_power_substate_exit(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); struct isci_host *ihost = iphy->owning_port->owning_controller; sci_controller_power_control_queue_remove(ihost, iphy); } static void sci_phy_starting_await_sata_power_substate_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); struct isci_host *ihost = iphy->owning_port->owning_controller; sci_controller_power_control_queue_insert(ihost, iphy); } static void sci_phy_starting_await_sata_power_substate_exit(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); struct isci_host *ihost = iphy->owning_port->owning_controller; sci_controller_power_control_queue_remove(ihost, iphy); } static void sci_phy_starting_await_sata_phy_substate_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); sci_mod_timer(&iphy->sata_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT); } static void sci_phy_starting_await_sata_phy_substate_exit(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); sci_del_timer(&iphy->sata_timer); } static void sci_phy_starting_await_sata_speed_substate_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); sci_mod_timer(&iphy->sata_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT); } static void sci_phy_starting_await_sata_speed_substate_exit(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); sci_del_timer(&iphy->sata_timer); } static void sci_phy_starting_await_sig_fis_uf_substate_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); if (sci_port_link_detected(iphy->owning_port, iphy)) { /* * Clear the PE suspend condition so we can actually * receive SIG FIS * The hardware will not respond to the XRDY until the PE * suspend condition is cleared. */ sci_phy_resume(iphy); sci_mod_timer(&iphy->sata_timer, SCIC_SDS_SIGNATURE_FIS_TIMEOUT); } else iphy->is_in_link_training = false; } static void sci_phy_starting_await_sig_fis_uf_substate_exit(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); sci_del_timer(&iphy->sata_timer); } static void sci_phy_starting_final_substate_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); /* State machine has run to completion so exit out and change * the base state machine to the ready state */ sci_change_state(&iphy->sm, SCI_PHY_READY); } /** * scu_link_layer_stop_protocol_engine() * @iphy: This is the struct isci_phy object to stop. * * This method will stop the struct isci_phy object. This does not reset the * protocol engine it just suspends it and places it in a state where it will * not cause the end device to power up. none */ static void scu_link_layer_stop_protocol_engine( struct isci_phy *iphy) { u32 scu_sas_pcfg_value; u32 enable_spinup_value; /* Suspend the protocol engine and place it in a sata spinup hold state */ scu_sas_pcfg_value = readl(&iphy->link_layer_registers->phy_configuration); scu_sas_pcfg_value |= (SCU_SAS_PCFG_GEN_BIT(OOB_RESET) | SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE) | SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD)); writel(scu_sas_pcfg_value, &iphy->link_layer_registers->phy_configuration); /* Disable the notify enable spinup primitives */ enable_spinup_value = readl(&iphy->link_layer_registers->notify_enable_spinup_control); enable_spinup_value &= ~SCU_ENSPINUP_GEN_BIT(ENABLE); writel(enable_spinup_value, &iphy->link_layer_registers->notify_enable_spinup_control); } static void scu_link_layer_start_oob(struct isci_phy *iphy) { struct scu_link_layer_registers __iomem *ll = iphy->link_layer_registers; u32 val; /** Reset OOB sequence - start */ val = readl(&ll->phy_configuration); val &= ~(SCU_SAS_PCFG_GEN_BIT(OOB_RESET) | SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE) | SCU_SAS_PCFG_GEN_BIT(HARD_RESET)); writel(val, &ll->phy_configuration); readl(&ll->phy_configuration); /* flush */ /** Reset OOB sequence - end */ /** Start OOB sequence - start */ val = readl(&ll->phy_configuration); val |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE); writel(val, &ll->phy_configuration); readl(&ll->phy_configuration); /* flush */ /** Start OOB sequence - end */ } /** * scu_link_layer_tx_hard_reset() * @iphy: This is the struct isci_phy object to stop. * * This method will transmit a hard reset request on the specified phy. The SCU * hardware requires that we reset the OOB state machine and set the hard reset * bit in the phy configuration register. We then must start OOB over with the * hard reset bit set. */ static void scu_link_layer_tx_hard_reset( struct isci_phy *iphy) { u32 phy_configuration_value; /* * SAS Phys must wait for the HARD_RESET_TX event notification to transition * to the starting state. */ phy_configuration_value = readl(&iphy->link_layer_registers->phy_configuration); phy_configuration_value &= ~(SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE)); phy_configuration_value |= (SCU_SAS_PCFG_GEN_BIT(HARD_RESET) | SCU_SAS_PCFG_GEN_BIT(OOB_RESET)); writel(phy_configuration_value, &iphy->link_layer_registers->phy_configuration); /* Now take the OOB state machine out of reset */ phy_configuration_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE); phy_configuration_value &= ~SCU_SAS_PCFG_GEN_BIT(OOB_RESET); writel(phy_configuration_value, &iphy->link_layer_registers->phy_configuration); } static void sci_phy_stopped_state_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); struct isci_port *iport = iphy->owning_port; struct isci_host *ihost = iport->owning_controller; /* * @todo We need to get to the controller to place this PE in a * reset state */ sci_del_timer(&iphy->sata_timer); scu_link_layer_stop_protocol_engine(iphy); if (iphy->sm.previous_state_id != SCI_PHY_INITIAL) sci_controller_link_down(ihost, phy_get_non_dummy_port(iphy), iphy); } static void sci_phy_starting_state_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); struct isci_port *iport = iphy->owning_port; struct isci_host *ihost = iport->owning_controller; scu_link_layer_stop_protocol_engine(iphy); scu_link_layer_start_oob(iphy); /* We don't know what kind of phy we are going to be just yet */ iphy->protocol = SAS_PROTOCOL_NONE; iphy->bcn_received_while_port_unassigned = false; if (iphy->sm.previous_state_id == SCI_PHY_READY) sci_controller_link_down(ihost, phy_get_non_dummy_port(iphy), iphy); sci_change_state(&iphy->sm, SCI_PHY_SUB_INITIAL); } static void sci_phy_ready_state_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); struct isci_port *iport = iphy->owning_port; struct isci_host *ihost = iport->owning_controller; sci_controller_link_up(ihost, phy_get_non_dummy_port(iphy), iphy); } static void sci_phy_ready_state_exit(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); sci_phy_suspend(iphy); } static void sci_phy_resetting_state_enter(struct sci_base_state_machine *sm) { struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); /* The phy is being reset, therefore deactivate it from the port. In * the resetting state we don't notify the user regarding link up and * link down notifications */ sci_port_deactivate_phy(iphy->owning_port, iphy, false); if (iphy->protocol == SAS_PROTOCOL_SSP) { scu_link_layer_tx_hard_reset(iphy); } else { /* The SCU does not need to have a discrete reset state so * just go back to the starting state. */ sci_change_state(&iphy->sm, SCI_PHY_STARTING); } } static const struct sci_base_state sci_phy_state_table[] = { [SCI_PHY_INITIAL] = { }, [SCI_PHY_STOPPED] = { .enter_state = sci_phy_stopped_state_enter, }, [SCI_PHY_STARTING] = { .enter_state = sci_phy_starting_state_enter, }, [SCI_PHY_SUB_INITIAL] = { .enter_state = sci_phy_starting_initial_substate_enter, }, [SCI_PHY_SUB_AWAIT_OSSP_EN] = { }, [SCI_PHY_SUB_AWAIT_SAS_SPEED_EN] = { }, [SCI_PHY_SUB_AWAIT_IAF_UF] = { }, [SCI_PHY_SUB_AWAIT_SAS_POWER] = { .enter_state = sci_phy_starting_await_sas_power_substate_enter, .exit_state = sci_phy_starting_await_sas_power_substate_exit, }, [SCI_PHY_SUB_AWAIT_SATA_POWER] = { .enter_state = sci_phy_starting_await_sata_power_substate_enter, .exit_state = sci_phy_starting_await_sata_power_substate_exit }, [SCI_PHY_SUB_AWAIT_SATA_PHY_EN] = { .enter_state = sci_phy_starting_await_sata_phy_substate_enter, .exit_state = sci_phy_starting_await_sata_phy_substate_exit }, [SCI_PHY_SUB_AWAIT_SATA_SPEED_EN] = { .enter_state = sci_phy_starting_await_sata_speed_substate_enter, .exit_state = sci_phy_starting_await_sata_speed_substate_exit }, [SCI_PHY_SUB_AWAIT_SIG_FIS_UF] = { .enter_state = sci_phy_starting_await_sig_fis_uf_substate_enter, .exit_state = sci_phy_starting_await_sig_fis_uf_substate_exit }, [SCI_PHY_SUB_FINAL] = { .enter_state = sci_phy_starting_final_substate_enter, }, [SCI_PHY_READY] = { .enter_state = sci_phy_ready_state_enter, .exit_state = sci_phy_ready_state_exit, }, [SCI_PHY_RESETTING] = { .enter_state = sci_phy_resetting_state_enter, }, [SCI_PHY_FINAL] = { }, }; void sci_phy_construct(struct isci_phy *iphy, struct isci_port *iport, u8 phy_index) { sci_init_sm(&iphy->sm, sci_phy_state_table, SCI_PHY_INITIAL); /* Copy the rest of the input data to our locals */ iphy->owning_port = iport; iphy->phy_index = phy_index; iphy->bcn_received_while_port_unassigned = false; iphy->protocol = SAS_PROTOCOL_NONE; iphy->link_layer_registers = NULL; iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN; /* Create the SIGNATURE FIS Timeout timer for this phy */ sci_init_timer(&iphy->sata_timer, phy_sata_timeout); } void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index) { struct sci_oem_params *oem = &ihost->oem_parameters; u64 sci_sas_addr; __be64 sas_addr; sci_sas_addr = oem->phys[index].sas_address.high; sci_sas_addr <<= 32; sci_sas_addr |= oem->phys[index].sas_address.low; sas_addr = cpu_to_be64(sci_sas_addr); memcpy(iphy->sas_addr, &sas_addr, sizeof(sas_addr)); iphy->sas_phy.enabled = 0; iphy->sas_phy.id = index; iphy->sas_phy.sas_addr = &iphy->sas_addr[0]; iphy->sas_phy.frame_rcvd = (u8 *)&iphy->frame_rcvd; iphy->sas_phy.ha = &ihost->sas_ha; iphy->sas_phy.lldd_phy = iphy; iphy->sas_phy.enabled = 1; iphy->sas_phy.iproto = SAS_PROTOCOL_ALL; iphy->sas_phy.tproto = 0; iphy->sas_phy.role = PHY_ROLE_INITIATOR; iphy->sas_phy.oob_mode = OOB_NOT_CONNECTED; iphy->sas_phy.linkrate = SAS_LINK_RATE_UNKNOWN; memset(&iphy->frame_rcvd, 0, sizeof(iphy->frame_rcvd)); } /** * isci_phy_control() - This function is one of the SAS Domain Template * functions. This is a phy management function. * @sas_phy: This parameter specifies the sphy being controlled. * @func: This parameter specifies the phy control function being invoked. * @buf: This parameter is specific to the phy function being invoked. * * status, zero indicates success. */ int isci_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, void *buf) { int ret = 0; struct isci_phy *iphy = sas_phy->lldd_phy; struct asd_sas_port *port = sas_phy->port; struct isci_host *ihost = sas_phy->ha->lldd_ha; unsigned long flags; dev_dbg(&ihost->pdev->dev, "%s: phy %p; func %d; buf %p; isci phy %p, port %p\n", __func__, sas_phy, func, buf, iphy, port); switch (func) { case PHY_FUNC_DISABLE: spin_lock_irqsave(&ihost->scic_lock, flags); scu_link_layer_start_oob(iphy); sci_phy_stop(iphy); spin_unlock_irqrestore(&ihost->scic_lock, flags); break; case PHY_FUNC_LINK_RESET: spin_lock_irqsave(&ihost->scic_lock, flags); scu_link_layer_start_oob(iphy); sci_phy_stop(iphy); sci_phy_start(iphy); spin_unlock_irqrestore(&ihost->scic_lock, flags); break; case PHY_FUNC_HARD_RESET: if (!port) return -ENODEV; ret = isci_port_perform_hard_reset(ihost, port->lldd_port, iphy); break; case PHY_FUNC_GET_EVENTS: { struct scu_link_layer_registers __iomem *r; struct sas_phy *phy = sas_phy->phy; r = iphy->link_layer_registers; phy->running_disparity_error_count = readl(&r->running_disparity_error_count); phy->loss_of_dword_sync_count = readl(&r->loss_of_sync_error_count); phy->phy_reset_problem_count = readl(&r->phy_reset_problem_count); phy->invalid_dword_count = readl(&r->invalid_dword_counter); break; } default: dev_dbg(&ihost->pdev->dev, "%s: phy %p; func %d NOT IMPLEMENTED!\n", __func__, sas_phy, func); ret = -ENOSYS; break; } return ret; }
linux-master
drivers/scsi/isci/phy.c
/* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * The full GNU General Public License is included in this distribution * in the file called LICENSE.GPL. * * BSD LICENSE * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/circ_buf.h> #include <linux/device.h> #include <scsi/sas.h> #include "host.h" #include "isci.h" #include "port.h" #include "probe_roms.h" #include "remote_device.h" #include "request.h" #include "scu_completion_codes.h" #include "scu_event_codes.h" #include "registers.h" #include "scu_remote_node_context.h" #include "scu_task_context.h" #define SCU_CONTEXT_RAM_INIT_STALL_TIME 200 #define smu_max_ports(dcc_value) \ (\ (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \ >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT) + 1 \ ) #define smu_max_task_contexts(dcc_value) \ (\ (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \ >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT) + 1 \ ) #define smu_max_rncs(dcc_value) \ (\ (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \ >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT) + 1 \ ) #define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT 100 /* * The number of milliseconds to wait while a given phy is consuming power * before allowing another set of phys to consume power. Ultimately, this will * be specified by OEM parameter. */ #define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500 /* * NORMALIZE_PUT_POINTER() - * * This macro will normalize the completion queue put pointer so its value can * be used as an array inde */ #define NORMALIZE_PUT_POINTER(x) \ ((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK) /* * NORMALIZE_EVENT_POINTER() - * * This macro will normalize the completion queue event entry so its value can * be used as an index. */ #define NORMALIZE_EVENT_POINTER(x) \ (\ ((x) & SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK) \ >> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT \ ) /* * NORMALIZE_GET_POINTER() - * * This macro will normalize the completion queue get pointer so its value can * be used as an index into an array */ #define NORMALIZE_GET_POINTER(x) \ ((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK) /* * NORMALIZE_GET_POINTER_CYCLE_BIT() - * * This macro will normalize the completion queue cycle pointer so it matches * the completion queue cycle bit */ #define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \ ((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT)) /* * COMPLETION_QUEUE_CYCLE_BIT() - * * This macro will return the cycle bit of the completion queue entry */ #define COMPLETION_QUEUE_CYCLE_BIT(x) ((x) & 0x80000000) /* Init the state machine and call the state entry function (if any) */ void sci_init_sm(struct sci_base_state_machine *sm, const struct sci_base_state *state_table, u32 initial_state) { sci_state_transition_t handler; sm->initial_state_id = initial_state; sm->previous_state_id = initial_state; sm->current_state_id = initial_state; sm->state_table = state_table; handler = sm->state_table[initial_state].enter_state; if (handler) handler(sm); } /* Call the state exit fn, update the current state, call the state entry fn */ void sci_change_state(struct sci_base_state_machine *sm, u32 next_state) { sci_state_transition_t handler; handler = sm->state_table[sm->current_state_id].exit_state; if (handler) handler(sm); sm->previous_state_id = sm->current_state_id; sm->current_state_id = next_state; handler = sm->state_table[sm->current_state_id].enter_state; if (handler) handler(sm); } static bool sci_controller_completion_queue_has_entries(struct isci_host *ihost) { u32 get_value = ihost->completion_queue_get; u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK; if (NORMALIZE_GET_POINTER_CYCLE_BIT(get_value) == COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index])) return true; return false; } static bool sci_controller_isr(struct isci_host *ihost) { if (sci_controller_completion_queue_has_entries(ihost)) return true; /* we have a spurious interrupt it could be that we have already * emptied the completion queue from a previous interrupt * FIXME: really!? */ writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status); /* There is a race in the hardware that could cause us not to be * notified of an interrupt completion if we do not take this * step. We will mask then unmask the interrupts so if there is * another interrupt pending the clearing of the interrupt * source we get the next interrupt message. */ spin_lock(&ihost->scic_lock); if (test_bit(IHOST_IRQ_ENABLED, &ihost->flags)) { writel(0xFF000000, &ihost->smu_registers->interrupt_mask); writel(0, &ihost->smu_registers->interrupt_mask); } spin_unlock(&ihost->scic_lock); return false; } irqreturn_t isci_msix_isr(int vec, void *data) { struct isci_host *ihost = data; if (sci_controller_isr(ihost)) tasklet_schedule(&ihost->completion_tasklet); return IRQ_HANDLED; } static bool sci_controller_error_isr(struct isci_host *ihost) { u32 interrupt_status; interrupt_status = readl(&ihost->smu_registers->interrupt_status); interrupt_status &= (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND); if (interrupt_status != 0) { /* * There is an error interrupt pending so let it through and handle * in the callback */ return true; } /* * There is a race in the hardware that could cause us not to be notified * of an interrupt completion if we do not take this step. We will mask * then unmask the error interrupts so if there was another interrupt * pending we will be notified. * Could we write the value of (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND)? */ writel(0xff, &ihost->smu_registers->interrupt_mask); writel(0, &ihost->smu_registers->interrupt_mask); return false; } static void sci_controller_task_completion(struct isci_host *ihost, u32 ent) { u32 index = SCU_GET_COMPLETION_INDEX(ent); struct isci_request *ireq = ihost->reqs[index]; /* Make sure that we really want to process this IO request */ if (test_bit(IREQ_ACTIVE, &ireq->flags) && ireq->io_tag != SCI_CONTROLLER_INVALID_IO_TAG && ISCI_TAG_SEQ(ireq->io_tag) == ihost->io_request_sequence[index]) /* Yep this is a valid io request pass it along to the * io request handler */ sci_io_request_tc_completion(ireq, ent); } static void sci_controller_sdma_completion(struct isci_host *ihost, u32 ent) { u32 index; struct isci_request *ireq; struct isci_remote_device *idev; index = SCU_GET_COMPLETION_INDEX(ent); switch (scu_get_command_request_type(ent)) { case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC: case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC: ireq = ihost->reqs[index]; dev_warn(&ihost->pdev->dev, "%s: %x for io request %p\n", __func__, ent, ireq); /* @todo For a post TC operation we need to fail the IO * request */ break; case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC: case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC: case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC: idev = ihost->device_table[index]; dev_warn(&ihost->pdev->dev, "%s: %x for device %p\n", __func__, ent, idev); /* @todo For a port RNC operation we need to fail the * device */ break; default: dev_warn(&ihost->pdev->dev, "%s: unknown completion type %x\n", __func__, ent); break; } } static void sci_controller_unsolicited_frame(struct isci_host *ihost, u32 ent) { u32 index; u32 frame_index; struct scu_unsolicited_frame_header *frame_header; struct isci_phy *iphy; struct isci_remote_device *idev; enum sci_status result = SCI_FAILURE; frame_index = SCU_GET_FRAME_INDEX(ent); frame_header = ihost->uf_control.buffers.array[frame_index].header; ihost->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE; if (SCU_GET_FRAME_ERROR(ent)) { /* * / @todo If the IAF frame or SIGNATURE FIS frame has an error will * / this cause a problem? We expect the phy initialization will * / fail if there is an error in the frame. */ sci_controller_release_frame(ihost, frame_index); return; } if (frame_header->is_address_frame) { index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent); iphy = &ihost->phys[index]; result = sci_phy_frame_handler(iphy, frame_index); } else { index = SCU_GET_COMPLETION_INDEX(ent); if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) { /* * This is a signature fis or a frame from a direct attached SATA * device that has not yet been created. In either case forwared * the frame to the PE and let it take care of the frame data. */ index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent); iphy = &ihost->phys[index]; result = sci_phy_frame_handler(iphy, frame_index); } else { if (index < ihost->remote_node_entries) idev = ihost->device_table[index]; else idev = NULL; if (idev != NULL) result = sci_remote_device_frame_handler(idev, frame_index); else sci_controller_release_frame(ihost, frame_index); } } if (result != SCI_SUCCESS) { /* * / @todo Is there any reason to report some additional error message * / when we get this failure notifiction? */ } } static void sci_controller_event_completion(struct isci_host *ihost, u32 ent) { struct isci_remote_device *idev; struct isci_request *ireq; struct isci_phy *iphy; u32 index; index = SCU_GET_COMPLETION_INDEX(ent); switch (scu_get_event_type(ent)) { case SCU_EVENT_TYPE_SMU_COMMAND_ERROR: /* / @todo The driver did something wrong and we need to fix the condtion. */ dev_err(&ihost->pdev->dev, "%s: SCIC Controller 0x%p received SMU command error " "0x%x\n", __func__, ihost, ent); break; case SCU_EVENT_TYPE_SMU_PCQ_ERROR: case SCU_EVENT_TYPE_SMU_ERROR: case SCU_EVENT_TYPE_FATAL_MEMORY_ERROR: /* * / @todo This is a hardware failure and its likely that we want to * / reset the controller. */ dev_err(&ihost->pdev->dev, "%s: SCIC Controller 0x%p received fatal controller " "event 0x%x\n", __func__, ihost, ent); break; case SCU_EVENT_TYPE_TRANSPORT_ERROR: ireq = ihost->reqs[index]; sci_io_request_event_handler(ireq, ent); break; case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT: switch (scu_get_event_specifier(ent)) { case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE: case SCU_EVENT_SPECIFIC_TASK_TIMEOUT: ireq = ihost->reqs[index]; if (ireq != NULL) sci_io_request_event_handler(ireq, ent); else dev_warn(&ihost->pdev->dev, "%s: SCIC Controller 0x%p received " "event 0x%x for io request object " "that doesn't exist.\n", __func__, ihost, ent); break; case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT: idev = ihost->device_table[index]; if (idev != NULL) sci_remote_device_event_handler(idev, ent); else dev_warn(&ihost->pdev->dev, "%s: SCIC Controller 0x%p received " "event 0x%x for remote device object " "that doesn't exist.\n", __func__, ihost, ent); break; } break; case SCU_EVENT_TYPE_BROADCAST_CHANGE: /* * direct the broadcast change event to the phy first and then let * the phy redirect the broadcast change to the port object */ case SCU_EVENT_TYPE_ERR_CNT_EVENT: /* * direct error counter event to the phy object since that is where * we get the event notification. This is a type 4 event. */ case SCU_EVENT_TYPE_OSSP_EVENT: index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent); iphy = &ihost->phys[index]; sci_phy_event_handler(iphy, ent); break; case SCU_EVENT_TYPE_RNC_SUSPEND_TX: case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: case SCU_EVENT_TYPE_RNC_OPS_MISC: if (index < ihost->remote_node_entries) { idev = ihost->device_table[index]; if (idev != NULL) sci_remote_device_event_handler(idev, ent); } else dev_err(&ihost->pdev->dev, "%s: SCIC Controller 0x%p received event 0x%x " "for remote device object 0x%0x that doesn't " "exist.\n", __func__, ihost, ent, index); break; default: dev_warn(&ihost->pdev->dev, "%s: SCIC Controller received unknown event code %x\n", __func__, ent); break; } } static void sci_controller_process_completions(struct isci_host *ihost) { u32 completion_count = 0; u32 ent; u32 get_index; u32 get_cycle; u32 event_get; u32 event_cycle; dev_dbg(&ihost->pdev->dev, "%s: completion queue beginning get:0x%08x\n", __func__, ihost->completion_queue_get); /* Get the component parts of the completion queue */ get_index = NORMALIZE_GET_POINTER(ihost->completion_queue_get); get_cycle = SMU_CQGR_CYCLE_BIT & ihost->completion_queue_get; event_get = NORMALIZE_EVENT_POINTER(ihost->completion_queue_get); event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & ihost->completion_queue_get; while ( NORMALIZE_GET_POINTER_CYCLE_BIT(get_cycle) == COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index]) ) { completion_count++; ent = ihost->completion_queue[get_index]; /* increment the get pointer and check for rollover to toggle the cycle bit */ get_cycle ^= ((get_index+1) & SCU_MAX_COMPLETION_QUEUE_ENTRIES) << (SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT - SCU_MAX_COMPLETION_QUEUE_SHIFT); get_index = (get_index+1) & (SCU_MAX_COMPLETION_QUEUE_ENTRIES-1); dev_dbg(&ihost->pdev->dev, "%s: completion queue entry:0x%08x\n", __func__, ent); switch (SCU_GET_COMPLETION_TYPE(ent)) { case SCU_COMPLETION_TYPE_TASK: sci_controller_task_completion(ihost, ent); break; case SCU_COMPLETION_TYPE_SDMA: sci_controller_sdma_completion(ihost, ent); break; case SCU_COMPLETION_TYPE_UFI: sci_controller_unsolicited_frame(ihost, ent); break; case SCU_COMPLETION_TYPE_EVENT: sci_controller_event_completion(ihost, ent); break; case SCU_COMPLETION_TYPE_NOTIFY: { event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) << (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT); event_get = (event_get+1) & (SCU_MAX_EVENTS-1); sci_controller_event_completion(ihost, ent); break; } default: dev_warn(&ihost->pdev->dev, "%s: SCIC Controller received unknown " "completion type %x\n", __func__, ent); break; } } /* Update the get register if we completed one or more entries */ if (completion_count > 0) { ihost->completion_queue_get = SMU_CQGR_GEN_BIT(ENABLE) | SMU_CQGR_GEN_BIT(EVENT_ENABLE) | event_cycle | SMU_CQGR_GEN_VAL(EVENT_POINTER, event_get) | get_cycle | SMU_CQGR_GEN_VAL(POINTER, get_index); writel(ihost->completion_queue_get, &ihost->smu_registers->completion_queue_get); } dev_dbg(&ihost->pdev->dev, "%s: completion queue ending get:0x%08x\n", __func__, ihost->completion_queue_get); } static void sci_controller_error_handler(struct isci_host *ihost) { u32 interrupt_status; interrupt_status = readl(&ihost->smu_registers->interrupt_status); if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) && sci_controller_completion_queue_has_entries(ihost)) { sci_controller_process_completions(ihost); writel(SMU_ISR_QUEUE_SUSPEND, &ihost->smu_registers->interrupt_status); } else { dev_err(&ihost->pdev->dev, "%s: status: %#x\n", __func__, interrupt_status); sci_change_state(&ihost->sm, SCIC_FAILED); return; } /* If we dont process any completions I am not sure that we want to do this. * We are in the middle of a hardware fault and should probably be reset. */ writel(0, &ihost->smu_registers->interrupt_mask); } irqreturn_t isci_intx_isr(int vec, void *data) { irqreturn_t ret = IRQ_NONE; struct isci_host *ihost = data; if (sci_controller_isr(ihost)) { writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status); tasklet_schedule(&ihost->completion_tasklet); ret = IRQ_HANDLED; } else if (sci_controller_error_isr(ihost)) { spin_lock(&ihost->scic_lock); sci_controller_error_handler(ihost); spin_unlock(&ihost->scic_lock); ret = IRQ_HANDLED; } return ret; } irqreturn_t isci_error_isr(int vec, void *data) { struct isci_host *ihost = data; if (sci_controller_error_isr(ihost)) sci_controller_error_handler(ihost); return IRQ_HANDLED; } /** * isci_host_start_complete() - This function is called by the core library, * through the ISCI Module, to indicate controller start status. * @ihost: This parameter specifies the ISCI host object * @completion_status: This parameter specifies the completion status from the * core library. * */ static void isci_host_start_complete(struct isci_host *ihost, enum sci_status completion_status) { if (completion_status != SCI_SUCCESS) dev_info(&ihost->pdev->dev, "controller start timed out, continuing...\n"); clear_bit(IHOST_START_PENDING, &ihost->flags); wake_up(&ihost->eventq); } int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time) { struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); struct isci_host *ihost = ha->lldd_ha; if (test_bit(IHOST_START_PENDING, &ihost->flags)) return 0; sas_drain_work(ha); return 1; } /** * sci_controller_get_suggested_start_timeout() - This method returns the * suggested sci_controller_start() timeout amount. The user is free to * use any timeout value, but this method provides the suggested minimum * start timeout value. The returned value is based upon empirical * information determined as a result of interoperability testing. * @ihost: the handle to the controller object for which to return the * suggested start timeout. * * This method returns the number of milliseconds for the suggested start * operation timeout. */ static u32 sci_controller_get_suggested_start_timeout(struct isci_host *ihost) { /* Validate the user supplied parameters. */ if (!ihost) return 0; /* * The suggested minimum timeout value for a controller start operation: * * Signature FIS Timeout * + Phy Start Timeout * + Number of Phy Spin Up Intervals * --------------------------------- * Number of milliseconds for the controller start operation. * * NOTE: The number of phy spin up intervals will be equivalent * to the number of phys divided by the number phys allowed * per interval - 1 (once OEM parameters are supported). * Currently we assume only 1 phy per interval. */ return SCIC_SDS_SIGNATURE_FIS_TIMEOUT + SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT + ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL); } static void sci_controller_enable_interrupts(struct isci_host *ihost) { set_bit(IHOST_IRQ_ENABLED, &ihost->flags); writel(0, &ihost->smu_registers->interrupt_mask); } void sci_controller_disable_interrupts(struct isci_host *ihost) { clear_bit(IHOST_IRQ_ENABLED, &ihost->flags); writel(0xffffffff, &ihost->smu_registers->interrupt_mask); readl(&ihost->smu_registers->interrupt_mask); /* flush */ } static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost) { u32 port_task_scheduler_value; port_task_scheduler_value = readl(&ihost->scu_registers->peg0.ptsg.control); port_task_scheduler_value |= (SCU_PTSGCR_GEN_BIT(ETM_ENABLE) | SCU_PTSGCR_GEN_BIT(PTSG_ENABLE)); writel(port_task_scheduler_value, &ihost->scu_registers->peg0.ptsg.control); } static void sci_controller_assign_task_entries(struct isci_host *ihost) { u32 task_assignment; /* * Assign all the TCs to function 0 * TODO: Do we actually need to read this register to write it back? */ task_assignment = readl(&ihost->smu_registers->task_context_assignment[0]); task_assignment |= (SMU_TCA_GEN_VAL(STARTING, 0)) | (SMU_TCA_GEN_VAL(ENDING, ihost->task_context_entries - 1)) | (SMU_TCA_GEN_BIT(RANGE_CHECK_ENABLE)); writel(task_assignment, &ihost->smu_registers->task_context_assignment[0]); } static void sci_controller_initialize_completion_queue(struct isci_host *ihost) { u32 index; u32 completion_queue_control_value; u32 completion_queue_get_value; u32 completion_queue_put_value; ihost->completion_queue_get = 0; completion_queue_control_value = (SMU_CQC_QUEUE_LIMIT_SET(SCU_MAX_COMPLETION_QUEUE_ENTRIES - 1) | SMU_CQC_EVENT_LIMIT_SET(SCU_MAX_EVENTS - 1)); writel(completion_queue_control_value, &ihost->smu_registers->completion_queue_control); /* Set the completion queue get pointer and enable the queue */ completion_queue_get_value = ( (SMU_CQGR_GEN_VAL(POINTER, 0)) | (SMU_CQGR_GEN_VAL(EVENT_POINTER, 0)) | (SMU_CQGR_GEN_BIT(ENABLE)) | (SMU_CQGR_GEN_BIT(EVENT_ENABLE)) ); writel(completion_queue_get_value, &ihost->smu_registers->completion_queue_get); /* Set the completion queue put pointer */ completion_queue_put_value = ( (SMU_CQPR_GEN_VAL(POINTER, 0)) | (SMU_CQPR_GEN_VAL(EVENT_POINTER, 0)) ); writel(completion_queue_put_value, &ihost->smu_registers->completion_queue_put); /* Initialize the cycle bit of the completion queue entries */ for (index = 0; index < SCU_MAX_COMPLETION_QUEUE_ENTRIES; index++) { /* * If get.cycle_bit != completion_queue.cycle_bit * its not a valid completion queue entry * so at system start all entries are invalid */ ihost->completion_queue[index] = 0x80000000; } } static void sci_controller_initialize_unsolicited_frame_queue(struct isci_host *ihost) { u32 frame_queue_control_value; u32 frame_queue_get_value; u32 frame_queue_put_value; /* Write the queue size */ frame_queue_control_value = SCU_UFQC_GEN_VAL(QUEUE_SIZE, SCU_MAX_UNSOLICITED_FRAMES); writel(frame_queue_control_value, &ihost->scu_registers->sdma.unsolicited_frame_queue_control); /* Setup the get pointer for the unsolicited frame queue */ frame_queue_get_value = ( SCU_UFQGP_GEN_VAL(POINTER, 0) | SCU_UFQGP_GEN_BIT(ENABLE_BIT) ); writel(frame_queue_get_value, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer); /* Setup the put pointer for the unsolicited frame queue */ frame_queue_put_value = SCU_UFQPP_GEN_VAL(POINTER, 0); writel(frame_queue_put_value, &ihost->scu_registers->sdma.unsolicited_frame_put_pointer); } void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status) { if (ihost->sm.current_state_id == SCIC_STARTING) { /* * We move into the ready state, because some of the phys/ports * may be up and operational. */ sci_change_state(&ihost->sm, SCIC_READY); isci_host_start_complete(ihost, status); } } static bool is_phy_starting(struct isci_phy *iphy) { enum sci_phy_states state; state = iphy->sm.current_state_id; switch (state) { case SCI_PHY_STARTING: case SCI_PHY_SUB_INITIAL: case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN: case SCI_PHY_SUB_AWAIT_IAF_UF: case SCI_PHY_SUB_AWAIT_SAS_POWER: case SCI_PHY_SUB_AWAIT_SATA_POWER: case SCI_PHY_SUB_AWAIT_SATA_PHY_EN: case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN: case SCI_PHY_SUB_AWAIT_OSSP_EN: case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: case SCI_PHY_SUB_FINAL: return true; default: return false; } } bool is_controller_start_complete(struct isci_host *ihost) { int i; for (i = 0; i < SCI_MAX_PHYS; i++) { struct isci_phy *iphy = &ihost->phys[i]; u32 state = iphy->sm.current_state_id; /* in apc mode we need to check every phy, in * mpc mode we only need to check phys that have * been configured into a port */ if (is_port_config_apc(ihost)) /* pass */; else if (!phy_get_non_dummy_port(iphy)) continue; /* The controller start operation is complete iff: * - all links have been given an opportunity to start * - have no indication of a connected device * - have an indication of a connected device and it has * finished the link training process. */ if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) || (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) || (iphy->is_in_link_training == true && is_phy_starting(iphy)) || (ihost->port_agent.phy_ready_mask != ihost->port_agent.phy_configured_mask)) return false; } return true; } /** * sci_controller_start_next_phy - start phy * @ihost: controller * * If all the phys have been started, then attempt to transition the * controller to the READY state and inform the user * (sci_cb_controller_start_complete()). */ static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost) { struct sci_oem_params *oem = &ihost->oem_parameters; struct isci_phy *iphy; enum sci_status status; status = SCI_SUCCESS; if (ihost->phy_startup_timer_pending) return status; if (ihost->next_phy_to_start >= SCI_MAX_PHYS) { if (is_controller_start_complete(ihost)) { sci_controller_transition_to_ready(ihost, SCI_SUCCESS); sci_del_timer(&ihost->phy_timer); ihost->phy_startup_timer_pending = false; } } else { iphy = &ihost->phys[ihost->next_phy_to_start]; if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) { if (phy_get_non_dummy_port(iphy) == NULL) { ihost->next_phy_to_start++; /* Caution recursion ahead be forwarned * * The PHY was never added to a PORT in MPC mode * so start the next phy in sequence This phy * will never go link up and will not draw power * the OEM parameters either configured the phy * incorrectly for the PORT or it was never * assigned to a PORT */ return sci_controller_start_next_phy(ihost); } } status = sci_phy_start(iphy); if (status == SCI_SUCCESS) { sci_mod_timer(&ihost->phy_timer, SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT); ihost->phy_startup_timer_pending = true; } else { dev_warn(&ihost->pdev->dev, "%s: Controller stop operation failed " "to stop phy %d because of status " "%d.\n", __func__, ihost->phys[ihost->next_phy_to_start].phy_index, status); } ihost->next_phy_to_start++; } return status; } static void phy_startup_timeout(struct timer_list *t) { struct sci_timer *tmr = from_timer(tmr, t, timer); struct isci_host *ihost = container_of(tmr, typeof(*ihost), phy_timer); unsigned long flags; enum sci_status status; spin_lock_irqsave(&ihost->scic_lock, flags); if (tmr->cancel) goto done; ihost->phy_startup_timer_pending = false; do { status = sci_controller_start_next_phy(ihost); } while (status != SCI_SUCCESS); done: spin_unlock_irqrestore(&ihost->scic_lock, flags); } static u16 isci_tci_active(struct isci_host *ihost) { return CIRC_CNT(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS); } static enum sci_status sci_controller_start(struct isci_host *ihost, u32 timeout) { enum sci_status result; u16 index; if (ihost->sm.current_state_id != SCIC_INITIALIZED) { dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", __func__, ihost->sm.current_state_id); return SCI_FAILURE_INVALID_STATE; } /* Build the TCi free pool */ BUILD_BUG_ON(SCI_MAX_IO_REQUESTS > 1 << sizeof(ihost->tci_pool[0]) * 8); ihost->tci_head = 0; ihost->tci_tail = 0; for (index = 0; index < ihost->task_context_entries; index++) isci_tci_free(ihost, index); /* Build the RNi free pool */ sci_remote_node_table_initialize(&ihost->available_remote_nodes, ihost->remote_node_entries); /* * Before anything else lets make sure we will not be * interrupted by the hardware. */ sci_controller_disable_interrupts(ihost); /* Enable the port task scheduler */ sci_controller_enable_port_task_scheduler(ihost); /* Assign all the task entries to ihost physical function */ sci_controller_assign_task_entries(ihost); /* Now initialize the completion queue */ sci_controller_initialize_completion_queue(ihost); /* Initialize the unsolicited frame queue for use */ sci_controller_initialize_unsolicited_frame_queue(ihost); /* Start all of the ports on this controller */ for (index = 0; index < ihost->logical_port_entries; index++) { struct isci_port *iport = &ihost->ports[index]; result = sci_port_start(iport); if (result) return result; } sci_controller_start_next_phy(ihost); sci_mod_timer(&ihost->timer, timeout); sci_change_state(&ihost->sm, SCIC_STARTING); return SCI_SUCCESS; } void isci_host_start(struct Scsi_Host *shost) { struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha; unsigned long tmo = sci_controller_get_suggested_start_timeout(ihost); set_bit(IHOST_START_PENDING, &ihost->flags); spin_lock_irq(&ihost->scic_lock); sci_controller_start(ihost, tmo); sci_controller_enable_interrupts(ihost); spin_unlock_irq(&ihost->scic_lock); } static void isci_host_stop_complete(struct isci_host *ihost) { sci_controller_disable_interrupts(ihost); clear_bit(IHOST_STOP_PENDING, &ihost->flags); wake_up(&ihost->eventq); } static void sci_controller_completion_handler(struct isci_host *ihost) { /* Empty out the completion queue */ if (sci_controller_completion_queue_has_entries(ihost)) sci_controller_process_completions(ihost); /* Clear the interrupt and enable all interrupts again */ writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status); /* Could we write the value of SMU_ISR_COMPLETION? */ writel(0xFF000000, &ihost->smu_registers->interrupt_mask); writel(0, &ihost->smu_registers->interrupt_mask); } void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_task *task) { if (!test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags) && !(task->task_state_flags & SAS_TASK_STATE_ABORTED)) { if (test_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags)) { /* Normal notification (task_done) */ dev_dbg(&ihost->pdev->dev, "%s: Normal - ireq/task = %p/%p\n", __func__, ireq, task); task->lldd_task = NULL; task->task_done(task); } else { dev_dbg(&ihost->pdev->dev, "%s: Error - ireq/task = %p/%p\n", __func__, ireq, task); if (sas_protocol_ata(task->task_proto)) task->lldd_task = NULL; sas_task_abort(task); } } else task->lldd_task = NULL; if (test_and_clear_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags)) wake_up_all(&ihost->eventq); if (!test_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags)) isci_free_tag(ihost, ireq->io_tag); } /** * isci_host_completion_routine() - This function is the delayed service * routine that calls the sci core library's completion handler. It's * scheduled as a tasklet from the interrupt service routine when interrupts * in use, or set as the timeout function in polled mode. * @data: This parameter specifies the ISCI host object * */ void isci_host_completion_routine(unsigned long data) { struct isci_host *ihost = (struct isci_host *)data; u16 active; spin_lock_irq(&ihost->scic_lock); sci_controller_completion_handler(ihost); spin_unlock_irq(&ihost->scic_lock); /* * we subtract SCI_MAX_PORTS to account for the number of dummy TCs * issued for hardware issue workaround */ active = isci_tci_active(ihost) - SCI_MAX_PORTS; /* * the coalesence timeout doubles at each encoding step, so * update it based on the ilog2 value of the outstanding requests */ writel(SMU_ICC_GEN_VAL(NUMBER, active) | SMU_ICC_GEN_VAL(TIMER, ISCI_COALESCE_BASE + ilog2(active)), &ihost->smu_registers->interrupt_coalesce_control); } /** * sci_controller_stop() - This method will stop an individual controller * object.This method will invoke the associated user callback upon * completion. The completion callback is called when the following * conditions are met: -# the method return status is SCI_SUCCESS. -# the * controller has been quiesced. This method will ensure that all IO * requests are quiesced, phys are stopped, and all additional operation by * the hardware is halted. * @ihost: the handle to the controller object to stop. * @timeout: This parameter specifies the number of milliseconds in which the * stop operation should complete. * * The controller must be in the STARTED or STOPPED state. Indicate if the * controller stop method succeeded or failed in some way. SCI_SUCCESS if the * stop operation successfully began. SCI_WARNING_ALREADY_IN_STATE if the * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the * controller is not either in the STARTED or STOPPED states. */ static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout) { if (ihost->sm.current_state_id != SCIC_READY) { dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", __func__, ihost->sm.current_state_id); return SCI_FAILURE_INVALID_STATE; } sci_mod_timer(&ihost->timer, timeout); sci_change_state(&ihost->sm, SCIC_STOPPING); return SCI_SUCCESS; } /** * sci_controller_reset() - This method will reset the supplied core * controller regardless of the state of said controller. This operation is * considered destructive. In other words, all current operations are wiped * out. No IO completions for outstanding devices occur. Outstanding IO * requests are not aborted or completed at the actual remote device. * @ihost: the handle to the controller object to reset. * * Indicate if the controller reset method succeeded or failed in some way. * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if * the controller reset operation is unable to complete. */ static enum sci_status sci_controller_reset(struct isci_host *ihost) { switch (ihost->sm.current_state_id) { case SCIC_RESET: case SCIC_READY: case SCIC_STOPPING: case SCIC_FAILED: /* * The reset operation is not a graceful cleanup, just * perform the state transition. */ sci_change_state(&ihost->sm, SCIC_RESETTING); return SCI_SUCCESS; default: dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", __func__, ihost->sm.current_state_id); return SCI_FAILURE_INVALID_STATE; } } static enum sci_status sci_controller_stop_phys(struct isci_host *ihost) { u32 index; enum sci_status status; enum sci_status phy_status; status = SCI_SUCCESS; for (index = 0; index < SCI_MAX_PHYS; index++) { phy_status = sci_phy_stop(&ihost->phys[index]); if (phy_status != SCI_SUCCESS && phy_status != SCI_FAILURE_INVALID_STATE) { status = SCI_FAILURE; dev_warn(&ihost->pdev->dev, "%s: Controller stop operation failed to stop " "phy %d because of status %d.\n", __func__, ihost->phys[index].phy_index, phy_status); } } return status; } /** * isci_host_deinit - shutdown frame reception and dma * @ihost: host to take down * * This is called in either the driver shutdown or the suspend path. In * the shutdown case libsas went through port teardown and normal device * removal (i.e. physical links stayed up to service scsi_device removal * commands). In the suspend case we disable the hardware without * notifying libsas of the link down events since we want libsas to * remember the domain across the suspend/resume cycle */ void isci_host_deinit(struct isci_host *ihost) { int i; /* disable output data selects */ for (i = 0; i < isci_gpio_count(ihost); i++) writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]); set_bit(IHOST_STOP_PENDING, &ihost->flags); spin_lock_irq(&ihost->scic_lock); sci_controller_stop(ihost, SCIC_CONTROLLER_STOP_TIMEOUT); spin_unlock_irq(&ihost->scic_lock); wait_for_stop(ihost); /* phy stop is after controller stop to allow port and device to * go idle before shutting down the phys, but the expectation is * that i/o has been shut off well before we reach this * function. */ sci_controller_stop_phys(ihost); /* disable sgpio: where the above wait should give time for the * enclosure to sample the gpios going inactive */ writel(0, &ihost->scu_registers->peg0.sgpio.interface_control); spin_lock_irq(&ihost->scic_lock); sci_controller_reset(ihost); spin_unlock_irq(&ihost->scic_lock); /* Cancel any/all outstanding port timers */ for (i = 0; i < ihost->logical_port_entries; i++) { struct isci_port *iport = &ihost->ports[i]; del_timer_sync(&iport->timer.timer); } /* Cancel any/all outstanding phy timers */ for (i = 0; i < SCI_MAX_PHYS; i++) { struct isci_phy *iphy = &ihost->phys[i]; del_timer_sync(&iphy->sata_timer.timer); } del_timer_sync(&ihost->port_agent.timer.timer); del_timer_sync(&ihost->power_control.timer.timer); del_timer_sync(&ihost->timer.timer); del_timer_sync(&ihost->phy_timer.timer); } static void __iomem *scu_base(struct isci_host *isci_host) { struct pci_dev *pdev = isci_host->pdev; int id = isci_host->id; return pcim_iomap_table(pdev)[SCI_SCU_BAR * 2] + SCI_SCU_BAR_SIZE * id; } static void __iomem *smu_base(struct isci_host *isci_host) { struct pci_dev *pdev = isci_host->pdev; int id = isci_host->id; return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id; } static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm) { struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); sci_change_state(&ihost->sm, SCIC_RESET); } static inline void sci_controller_starting_state_exit(struct sci_base_state_machine *sm) { struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); sci_del_timer(&ihost->timer); } #define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS 853 #define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS 1280 #define INTERRUPT_COALESCE_TIMEOUT_MAX_US 2700000 #define INTERRUPT_COALESCE_NUMBER_MAX 256 #define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN 7 #define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX 28 /** * sci_controller_set_interrupt_coalescence() - This method allows the user to * configure the interrupt coalescence. * @ihost: This parameter represents the handle to the controller object * for which its interrupt coalesce register is overridden. * @coalesce_number: Used to control the number of entries in the Completion * Queue before an interrupt is generated. If the number of entries exceed * this number, an interrupt will be generated. The valid range of the input * is [0, 256]. A setting of 0 results in coalescing being disabled. * @coalesce_timeout: Timeout value in microseconds. The valid range of the * input is [0, 2700000] . A setting of 0 is allowed and results in no * interrupt coalescing timeout. * * Indicate if the user successfully set the interrupt coalesce parameters. * SCI_SUCCESS The user successfully updated the interrutp coalescence. * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range. */ static enum sci_status sci_controller_set_interrupt_coalescence(struct isci_host *ihost, u32 coalesce_number, u32 coalesce_timeout) { u8 timeout_encode = 0; u32 min = 0; u32 max = 0; /* Check if the input parameters fall in the range. */ if (coalesce_number > INTERRUPT_COALESCE_NUMBER_MAX) return SCI_FAILURE_INVALID_PARAMETER_VALUE; /* * Defined encoding for interrupt coalescing timeout: * Value Min Max Units * ----- --- --- ----- * 0 - - Disabled * 1 13.3 20.0 ns * 2 26.7 40.0 * 3 53.3 80.0 * 4 106.7 160.0 * 5 213.3 320.0 * 6 426.7 640.0 * 7 853.3 1280.0 * 8 1.7 2.6 us * 9 3.4 5.1 * 10 6.8 10.2 * 11 13.7 20.5 * 12 27.3 41.0 * 13 54.6 81.9 * 14 109.2 163.8 * 15 218.5 327.7 * 16 436.9 655.4 * 17 873.8 1310.7 * 18 1.7 2.6 ms * 19 3.5 5.2 * 20 7.0 10.5 * 21 14.0 21.0 * 22 28.0 41.9 * 23 55.9 83.9 * 24 111.8 167.8 * 25 223.7 335.5 * 26 447.4 671.1 * 27 894.8 1342.2 * 28 1.8 2.7 s * Others Undefined */ /* * Use the table above to decide the encode of interrupt coalescing timeout * value for register writing. */ if (coalesce_timeout == 0) timeout_encode = 0; else{ /* make the timeout value in unit of (10 ns). */ coalesce_timeout = coalesce_timeout * 100; min = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS / 10; max = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS / 10; /* get the encode of timeout for register writing. */ for (timeout_encode = INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN; timeout_encode <= INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX; timeout_encode++) { if (min <= coalesce_timeout && max > coalesce_timeout) break; else if (coalesce_timeout >= max && coalesce_timeout < min * 2 && coalesce_timeout <= INTERRUPT_COALESCE_TIMEOUT_MAX_US * 100) { if ((coalesce_timeout - max) < (2 * min - coalesce_timeout)) break; else{ timeout_encode++; break; } } else { max = max * 2; min = min * 2; } } if (timeout_encode == INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX + 1) /* the value is out of range. */ return SCI_FAILURE_INVALID_PARAMETER_VALUE; } writel(SMU_ICC_GEN_VAL(NUMBER, coalesce_number) | SMU_ICC_GEN_VAL(TIMER, timeout_encode), &ihost->smu_registers->interrupt_coalesce_control); ihost->interrupt_coalesce_number = (u16)coalesce_number; ihost->interrupt_coalesce_timeout = coalesce_timeout / 100; return SCI_SUCCESS; } static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm) { struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); u32 val; /* enable clock gating for power control of the scu unit */ val = readl(&ihost->smu_registers->clock_gating_control); val &= ~(SMU_CGUCR_GEN_BIT(REGCLK_ENABLE) | SMU_CGUCR_GEN_BIT(TXCLK_ENABLE) | SMU_CGUCR_GEN_BIT(XCLK_ENABLE)); val |= SMU_CGUCR_GEN_BIT(IDLE_ENABLE); writel(val, &ihost->smu_registers->clock_gating_control); /* set the default interrupt coalescence number and timeout value. */ sci_controller_set_interrupt_coalescence(ihost, 0, 0); } static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm) { struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); /* disable interrupt coalescence. */ sci_controller_set_interrupt_coalescence(ihost, 0, 0); } static enum sci_status sci_controller_stop_ports(struct isci_host *ihost) { u32 index; enum sci_status port_status; enum sci_status status = SCI_SUCCESS; for (index = 0; index < ihost->logical_port_entries; index++) { struct isci_port *iport = &ihost->ports[index]; port_status = sci_port_stop(iport); if ((port_status != SCI_SUCCESS) && (port_status != SCI_FAILURE_INVALID_STATE)) { status = SCI_FAILURE; dev_warn(&ihost->pdev->dev, "%s: Controller stop operation failed to " "stop port %d because of status %d.\n", __func__, iport->logical_port_index, port_status); } } return status; } static enum sci_status sci_controller_stop_devices(struct isci_host *ihost) { u32 index; enum sci_status status; enum sci_status device_status; status = SCI_SUCCESS; for (index = 0; index < ihost->remote_node_entries; index++) { if (ihost->device_table[index] != NULL) { /* / @todo What timeout value do we want to provide to this request? */ device_status = sci_remote_device_stop(ihost->device_table[index], 0); if ((device_status != SCI_SUCCESS) && (device_status != SCI_FAILURE_INVALID_STATE)) { dev_warn(&ihost->pdev->dev, "%s: Controller stop operation failed " "to stop device 0x%p because of " "status %d.\n", __func__, ihost->device_table[index], device_status); } } } return status; } static void sci_controller_stopping_state_enter(struct sci_base_state_machine *sm) { struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); sci_controller_stop_devices(ihost); sci_controller_stop_ports(ihost); if (!sci_controller_has_remote_devices_stopping(ihost)) isci_host_stop_complete(ihost); } static void sci_controller_stopping_state_exit(struct sci_base_state_machine *sm) { struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); sci_del_timer(&ihost->timer); } static void sci_controller_reset_hardware(struct isci_host *ihost) { /* Disable interrupts so we dont take any spurious interrupts */ sci_controller_disable_interrupts(ihost); /* Reset the SCU */ writel(0xFFFFFFFF, &ihost->smu_registers->soft_reset_control); /* Delay for 1ms to before clearing the CQP and UFQPR. */ udelay(1000); /* The write to the CQGR clears the CQP */ writel(0x00000000, &ihost->smu_registers->completion_queue_get); /* The write to the UFQGP clears the UFQPR */ writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer); /* clear all interrupts */ writel(~SMU_INTERRUPT_STATUS_RESERVED_MASK, &ihost->smu_registers->interrupt_status); } static void sci_controller_resetting_state_enter(struct sci_base_state_machine *sm) { struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); sci_controller_reset_hardware(ihost); sci_change_state(&ihost->sm, SCIC_RESET); } static const struct sci_base_state sci_controller_state_table[] = { [SCIC_INITIAL] = { .enter_state = sci_controller_initial_state_enter, }, [SCIC_RESET] = {}, [SCIC_INITIALIZING] = {}, [SCIC_INITIALIZED] = {}, [SCIC_STARTING] = { .exit_state = sci_controller_starting_state_exit, }, [SCIC_READY] = { .enter_state = sci_controller_ready_state_enter, .exit_state = sci_controller_ready_state_exit, }, [SCIC_RESETTING] = { .enter_state = sci_controller_resetting_state_enter, }, [SCIC_STOPPING] = { .enter_state = sci_controller_stopping_state_enter, .exit_state = sci_controller_stopping_state_exit, }, [SCIC_FAILED] = {} }; static void controller_timeout(struct timer_list *t) { struct sci_timer *tmr = from_timer(tmr, t, timer); struct isci_host *ihost = container_of(tmr, typeof(*ihost), timer); struct sci_base_state_machine *sm = &ihost->sm; unsigned long flags; spin_lock_irqsave(&ihost->scic_lock, flags); if (tmr->cancel) goto done; if (sm->current_state_id == SCIC_STARTING) sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT); else if (sm->current_state_id == SCIC_STOPPING) { sci_change_state(sm, SCIC_FAILED); isci_host_stop_complete(ihost); } else /* / @todo Now what do we want to do in this case? */ dev_err(&ihost->pdev->dev, "%s: Controller timer fired when controller was not " "in a state being timed.\n", __func__); done: spin_unlock_irqrestore(&ihost->scic_lock, flags); } static enum sci_status sci_controller_construct(struct isci_host *ihost, void __iomem *scu_base, void __iomem *smu_base) { u8 i; sci_init_sm(&ihost->sm, sci_controller_state_table, SCIC_INITIAL); ihost->scu_registers = scu_base; ihost->smu_registers = smu_base; sci_port_configuration_agent_construct(&ihost->port_agent); /* Construct the ports for this controller */ for (i = 0; i < SCI_MAX_PORTS; i++) sci_port_construct(&ihost->ports[i], i, ihost); sci_port_construct(&ihost->ports[i], SCIC_SDS_DUMMY_PORT, ihost); /* Construct the phys for this controller */ for (i = 0; i < SCI_MAX_PHYS; i++) { /* Add all the PHYs to the dummy port */ sci_phy_construct(&ihost->phys[i], &ihost->ports[SCI_MAX_PORTS], i); } ihost->invalid_phy_mask = 0; sci_init_timer(&ihost->timer, controller_timeout); return sci_controller_reset(ihost); } int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version) { int i; for (i = 0; i < SCI_MAX_PORTS; i++) if (oem->ports[i].phy_mask > SCIC_SDS_PARM_PHY_MASK_MAX) return -EINVAL; for (i = 0; i < SCI_MAX_PHYS; i++) if (oem->phys[i].sas_address.high == 0 && oem->phys[i].sas_address.low == 0) return -EINVAL; if (oem->controller.mode_type == SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE) { for (i = 0; i < SCI_MAX_PHYS; i++) if (oem->ports[i].phy_mask != 0) return -EINVAL; } else if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) { u8 phy_mask = 0; for (i = 0; i < SCI_MAX_PHYS; i++) phy_mask |= oem->ports[i].phy_mask; if (phy_mask == 0) return -EINVAL; } else return -EINVAL; if (oem->controller.max_concurr_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT || oem->controller.max_concurr_spin_up < 1) return -EINVAL; if (oem->controller.do_enable_ssc) { if (version < ISCI_ROM_VER_1_1 && oem->controller.do_enable_ssc != 1) return -EINVAL; if (version >= ISCI_ROM_VER_1_1) { u8 test = oem->controller.ssc_sata_tx_spread_level; switch (test) { case 0: case 2: case 3: case 6: case 7: break; default: return -EINVAL; } test = oem->controller.ssc_sas_tx_spread_level; if (oem->controller.ssc_sas_tx_type == 0) { switch (test) { case 0: case 2: case 3: break; default: return -EINVAL; } } else if (oem->controller.ssc_sas_tx_type == 1) { switch (test) { case 0: case 3: case 6: break; default: return -EINVAL; } } } } return 0; } static u8 max_spin_up(struct isci_host *ihost) { if (ihost->user_parameters.max_concurr_spinup) return min_t(u8, ihost->user_parameters.max_concurr_spinup, MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT); else return min_t(u8, ihost->oem_parameters.controller.max_concurr_spin_up, MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT); } static void power_control_timeout(struct timer_list *t) { struct sci_timer *tmr = from_timer(tmr, t, timer); struct isci_host *ihost = container_of(tmr, typeof(*ihost), power_control.timer); struct isci_phy *iphy; unsigned long flags; u8 i; spin_lock_irqsave(&ihost->scic_lock, flags); if (tmr->cancel) goto done; ihost->power_control.phys_granted_power = 0; if (ihost->power_control.phys_waiting == 0) { ihost->power_control.timer_started = false; goto done; } for (i = 0; i < SCI_MAX_PHYS; i++) { if (ihost->power_control.phys_waiting == 0) break; iphy = ihost->power_control.requesters[i]; if (iphy == NULL) continue; if (ihost->power_control.phys_granted_power >= max_spin_up(ihost)) break; ihost->power_control.requesters[i] = NULL; ihost->power_control.phys_waiting--; ihost->power_control.phys_granted_power++; sci_phy_consume_power_handler(iphy); if (iphy->protocol == SAS_PROTOCOL_SSP) { u8 j; for (j = 0; j < SCI_MAX_PHYS; j++) { struct isci_phy *requester = ihost->power_control.requesters[j]; /* * Search the power_control queue to see if there are other phys * attached to the same remote device. If found, take all of * them out of await_sas_power state. */ if (requester != NULL && requester != iphy) { u8 other = memcmp(requester->frame_rcvd.iaf.sas_addr, iphy->frame_rcvd.iaf.sas_addr, sizeof(requester->frame_rcvd.iaf.sas_addr)); if (other == 0) { ihost->power_control.requesters[j] = NULL; ihost->power_control.phys_waiting--; sci_phy_consume_power_handler(requester); } } } } } /* * It doesn't matter if the power list is empty, we need to start the * timer in case another phy becomes ready. */ sci_mod_timer(tmr, SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL); ihost->power_control.timer_started = true; done: spin_unlock_irqrestore(&ihost->scic_lock, flags); } void sci_controller_power_control_queue_insert(struct isci_host *ihost, struct isci_phy *iphy) { BUG_ON(iphy == NULL); if (ihost->power_control.phys_granted_power < max_spin_up(ihost)) { ihost->power_control.phys_granted_power++; sci_phy_consume_power_handler(iphy); /* * stop and start the power_control timer. When the timer fires, the * no_of_phys_granted_power will be set to 0 */ if (ihost->power_control.timer_started) sci_del_timer(&ihost->power_control.timer); sci_mod_timer(&ihost->power_control.timer, SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL); ihost->power_control.timer_started = true; } else { /* * There are phys, attached to the same sas address as this phy, are * already in READY state, this phy don't need wait. */ u8 i; struct isci_phy *current_phy; for (i = 0; i < SCI_MAX_PHYS; i++) { u8 other; current_phy = &ihost->phys[i]; other = memcmp(current_phy->frame_rcvd.iaf.sas_addr, iphy->frame_rcvd.iaf.sas_addr, sizeof(current_phy->frame_rcvd.iaf.sas_addr)); if (current_phy->sm.current_state_id == SCI_PHY_READY && current_phy->protocol == SAS_PROTOCOL_SSP && other == 0) { sci_phy_consume_power_handler(iphy); break; } } if (i == SCI_MAX_PHYS) { /* Add the phy in the waiting list */ ihost->power_control.requesters[iphy->phy_index] = iphy; ihost->power_control.phys_waiting++; } } } void sci_controller_power_control_queue_remove(struct isci_host *ihost, struct isci_phy *iphy) { BUG_ON(iphy == NULL); if (ihost->power_control.requesters[iphy->phy_index]) ihost->power_control.phys_waiting--; ihost->power_control.requesters[iphy->phy_index] = NULL; } static int is_long_cable(int phy, unsigned char selection_byte) { return !!(selection_byte & (1 << phy)); } static int is_medium_cable(int phy, unsigned char selection_byte) { return !!(selection_byte & (1 << (phy + 4))); } static enum cable_selections decode_selection_byte( int phy, unsigned char selection_byte) { return ((selection_byte & (1 << phy)) ? 1 : 0) + (selection_byte & (1 << (phy + 4)) ? 2 : 0); } static unsigned char *to_cable_select(struct isci_host *ihost) { if (is_cable_select_overridden()) return ((unsigned char *)&cable_selection_override) + ihost->id; else return &ihost->oem_parameters.controller.cable_selection_mask; } enum cable_selections decode_cable_selection(struct isci_host *ihost, int phy) { return decode_selection_byte(phy, *to_cable_select(ihost)); } char *lookup_cable_names(enum cable_selections selection) { static char *cable_names[] = { [short_cable] = "short", [long_cable] = "long", [medium_cable] = "medium", [undefined_cable] = "<undefined, assumed long>" /* bit 0==1 */ }; return (selection <= undefined_cable) ? cable_names[selection] : cable_names[undefined_cable]; } #define AFE_REGISTER_WRITE_DELAY 10 static void sci_controller_afe_initialization(struct isci_host *ihost) { struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe; const struct sci_oem_params *oem = &ihost->oem_parameters; struct pci_dev *pdev = ihost->pdev; u32 afe_status; u32 phy_id; unsigned char cable_selection_mask = *to_cable_select(ihost); /* Clear DFX Status registers */ writel(0x0081000f, &afe->afe_dfx_master_control0); udelay(AFE_REGISTER_WRITE_DELAY); if (is_b0(pdev) || is_c0(pdev) || is_c1(pdev)) { /* PM Rx Equalization Save, PM SPhy Rx Acknowledgement * Timer, PM Stagger Timer */ writel(0x0007FFFF, &afe->afe_pmsn_master_control2); udelay(AFE_REGISTER_WRITE_DELAY); } /* Configure bias currents to normal */ if (is_a2(pdev)) writel(0x00005A00, &afe->afe_bias_control); else if (is_b0(pdev) || is_c0(pdev)) writel(0x00005F00, &afe->afe_bias_control); else if (is_c1(pdev)) writel(0x00005500, &afe->afe_bias_control); udelay(AFE_REGISTER_WRITE_DELAY); /* Enable PLL */ if (is_a2(pdev)) writel(0x80040908, &afe->afe_pll_control0); else if (is_b0(pdev) || is_c0(pdev)) writel(0x80040A08, &afe->afe_pll_control0); else if (is_c1(pdev)) { writel(0x80000B08, &afe->afe_pll_control0); udelay(AFE_REGISTER_WRITE_DELAY); writel(0x00000B08, &afe->afe_pll_control0); udelay(AFE_REGISTER_WRITE_DELAY); writel(0x80000B08, &afe->afe_pll_control0); } udelay(AFE_REGISTER_WRITE_DELAY); /* Wait for the PLL to lock */ do { afe_status = readl(&afe->afe_common_block_status); udelay(AFE_REGISTER_WRITE_DELAY); } while ((afe_status & 0x00001000) == 0); if (is_a2(pdev)) { /* Shorten SAS SNW lock time (RxLock timer value from 76 * us to 50 us) */ writel(0x7bcc96ad, &afe->afe_pmsn_master_control0); udelay(AFE_REGISTER_WRITE_DELAY); } for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) { struct scu_afe_transceiver __iomem *xcvr = &afe->scu_afe_xcvr[phy_id]; const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id]; int cable_length_long = is_long_cable(phy_id, cable_selection_mask); int cable_length_medium = is_medium_cable(phy_id, cable_selection_mask); if (is_a2(pdev)) { /* All defaults, except the Receive Word * Alignament/Comma Detect Enable....(0xe800) */ writel(0x00004512, &xcvr->afe_xcvr_control0); udelay(AFE_REGISTER_WRITE_DELAY); writel(0x0050100F, &xcvr->afe_xcvr_control1); udelay(AFE_REGISTER_WRITE_DELAY); } else if (is_b0(pdev)) { /* Configure transmitter SSC parameters */ writel(0x00030000, &xcvr->afe_tx_ssc_control); udelay(AFE_REGISTER_WRITE_DELAY); } else if (is_c0(pdev)) { /* Configure transmitter SSC parameters */ writel(0x00010202, &xcvr->afe_tx_ssc_control); udelay(AFE_REGISTER_WRITE_DELAY); /* All defaults, except the Receive Word * Alignament/Comma Detect Enable....(0xe800) */ writel(0x00014500, &xcvr->afe_xcvr_control0); udelay(AFE_REGISTER_WRITE_DELAY); } else if (is_c1(pdev)) { /* Configure transmitter SSC parameters */ writel(0x00010202, &xcvr->afe_tx_ssc_control); udelay(AFE_REGISTER_WRITE_DELAY); /* All defaults, except the Receive Word * Alignament/Comma Detect Enable....(0xe800) */ writel(0x0001C500, &xcvr->afe_xcvr_control0); udelay(AFE_REGISTER_WRITE_DELAY); } /* Power up TX and RX out from power down (PWRDNTX and * PWRDNRX) & increase TX int & ext bias 20%....(0xe85c) */ if (is_a2(pdev)) writel(0x000003F0, &xcvr->afe_channel_control); else if (is_b0(pdev)) { writel(0x000003D7, &xcvr->afe_channel_control); udelay(AFE_REGISTER_WRITE_DELAY); writel(0x000003D4, &xcvr->afe_channel_control); } else if (is_c0(pdev)) { writel(0x000001E7, &xcvr->afe_channel_control); udelay(AFE_REGISTER_WRITE_DELAY); writel(0x000001E4, &xcvr->afe_channel_control); } else if (is_c1(pdev)) { writel(cable_length_long ? 0x000002F7 : 0x000001F7, &xcvr->afe_channel_control); udelay(AFE_REGISTER_WRITE_DELAY); writel(cable_length_long ? 0x000002F4 : 0x000001F4, &xcvr->afe_channel_control); } udelay(AFE_REGISTER_WRITE_DELAY); if (is_a2(pdev)) { /* Enable TX equalization (0xe824) */ writel(0x00040000, &xcvr->afe_tx_control); udelay(AFE_REGISTER_WRITE_DELAY); } if (is_a2(pdev) || is_b0(pdev)) /* RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, * TPD=0x0(TX Power On), RDD=0x0(RX Detect * Enabled) ....(0xe800) */ writel(0x00004100, &xcvr->afe_xcvr_control0); else if (is_c0(pdev)) writel(0x00014100, &xcvr->afe_xcvr_control0); else if (is_c1(pdev)) writel(0x0001C100, &xcvr->afe_xcvr_control0); udelay(AFE_REGISTER_WRITE_DELAY); /* Leave DFE/FFE on */ if (is_a2(pdev)) writel(0x3F11103F, &xcvr->afe_rx_ssc_control0); else if (is_b0(pdev)) { writel(0x3F11103F, &xcvr->afe_rx_ssc_control0); udelay(AFE_REGISTER_WRITE_DELAY); /* Enable TX equalization (0xe824) */ writel(0x00040000, &xcvr->afe_tx_control); } else if (is_c0(pdev)) { writel(0x01400C0F, &xcvr->afe_rx_ssc_control1); udelay(AFE_REGISTER_WRITE_DELAY); writel(0x3F6F103F, &xcvr->afe_rx_ssc_control0); udelay(AFE_REGISTER_WRITE_DELAY); /* Enable TX equalization (0xe824) */ writel(0x00040000, &xcvr->afe_tx_control); } else if (is_c1(pdev)) { writel(cable_length_long ? 0x01500C0C : cable_length_medium ? 0x01400C0D : 0x02400C0D, &xcvr->afe_xcvr_control1); udelay(AFE_REGISTER_WRITE_DELAY); writel(0x000003E0, &xcvr->afe_dfx_rx_control1); udelay(AFE_REGISTER_WRITE_DELAY); writel(cable_length_long ? 0x33091C1F : cable_length_medium ? 0x3315181F : 0x2B17161F, &xcvr->afe_rx_ssc_control0); udelay(AFE_REGISTER_WRITE_DELAY); /* Enable TX equalization (0xe824) */ writel(0x00040000, &xcvr->afe_tx_control); } udelay(AFE_REGISTER_WRITE_DELAY); writel(oem_phy->afe_tx_amp_control0, &xcvr->afe_tx_amp_control0); udelay(AFE_REGISTER_WRITE_DELAY); writel(oem_phy->afe_tx_amp_control1, &xcvr->afe_tx_amp_control1); udelay(AFE_REGISTER_WRITE_DELAY); writel(oem_phy->afe_tx_amp_control2, &xcvr->afe_tx_amp_control2); udelay(AFE_REGISTER_WRITE_DELAY); writel(oem_phy->afe_tx_amp_control3, &xcvr->afe_tx_amp_control3); udelay(AFE_REGISTER_WRITE_DELAY); } /* Transfer control to the PEs */ writel(0x00010f00, &afe->afe_dfx_master_control0); udelay(AFE_REGISTER_WRITE_DELAY); } static void sci_controller_initialize_power_control(struct isci_host *ihost) { sci_init_timer(&ihost->power_control.timer, power_control_timeout); memset(ihost->power_control.requesters, 0, sizeof(ihost->power_control.requesters)); ihost->power_control.phys_waiting = 0; ihost->power_control.phys_granted_power = 0; } static enum sci_status sci_controller_initialize(struct isci_host *ihost) { struct sci_base_state_machine *sm = &ihost->sm; enum sci_status result = SCI_FAILURE; unsigned long i, state, val; if (ihost->sm.current_state_id != SCIC_RESET) { dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", __func__, ihost->sm.current_state_id); return SCI_FAILURE_INVALID_STATE; } sci_change_state(sm, SCIC_INITIALIZING); sci_init_timer(&ihost->phy_timer, phy_startup_timeout); ihost->next_phy_to_start = 0; ihost->phy_startup_timer_pending = false; sci_controller_initialize_power_control(ihost); /* * There is nothing to do here for B0 since we do not have to * program the AFE registers. * / @todo The AFE settings are supposed to be correct for the B0 but * / presently they seem to be wrong. */ sci_controller_afe_initialization(ihost); /* Take the hardware out of reset */ writel(0, &ihost->smu_registers->soft_reset_control); /* * / @todo Provide meaningfull error code for hardware failure * result = SCI_FAILURE_CONTROLLER_HARDWARE; */ for (i = 100; i >= 1; i--) { u32 status; /* Loop until the hardware reports success */ udelay(SCU_CONTEXT_RAM_INIT_STALL_TIME); status = readl(&ihost->smu_registers->control_status); if ((status & SCU_RAM_INIT_COMPLETED) == SCU_RAM_INIT_COMPLETED) break; } if (i == 0) goto out; /* * Determine what are the actaul device capacities that the * hardware will support */ val = readl(&ihost->smu_registers->device_context_capacity); /* Record the smaller of the two capacity values */ ihost->logical_port_entries = min(smu_max_ports(val), SCI_MAX_PORTS); ihost->task_context_entries = min(smu_max_task_contexts(val), SCI_MAX_IO_REQUESTS); ihost->remote_node_entries = min(smu_max_rncs(val), SCI_MAX_REMOTE_DEVICES); /* * Make all PEs that are unassigned match up with the * logical ports */ for (i = 0; i < ihost->logical_port_entries; i++) { struct scu_port_task_scheduler_group_registers __iomem *ptsg = &ihost->scu_registers->peg0.ptsg; writel(i, &ptsg->protocol_engine[i]); } /* Initialize hardware PCI Relaxed ordering in DMA engines */ val = readl(&ihost->scu_registers->sdma.pdma_configuration); val |= SCU_PDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE); writel(val, &ihost->scu_registers->sdma.pdma_configuration); val = readl(&ihost->scu_registers->sdma.cdma_configuration); val |= SCU_CDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE); writel(val, &ihost->scu_registers->sdma.cdma_configuration); /* * Initialize the PHYs before the PORTs because the PHY registers * are accessed during the port initialization. */ for (i = 0; i < SCI_MAX_PHYS; i++) { result = sci_phy_initialize(&ihost->phys[i], &ihost->scu_registers->peg0.pe[i].tl, &ihost->scu_registers->peg0.pe[i].ll); if (result != SCI_SUCCESS) goto out; } for (i = 0; i < ihost->logical_port_entries; i++) { struct isci_port *iport = &ihost->ports[i]; iport->port_task_scheduler_registers = &ihost->scu_registers->peg0.ptsg.port[i]; iport->port_pe_configuration_register = &ihost->scu_registers->peg0.ptsg.protocol_engine[0]; iport->viit_registers = &ihost->scu_registers->peg0.viit[i]; } result = sci_port_configuration_agent_initialize(ihost, &ihost->port_agent); out: /* Advance the controller state machine */ if (result == SCI_SUCCESS) state = SCIC_INITIALIZED; else state = SCIC_FAILED; sci_change_state(sm, state); return result; } static int sci_controller_dma_alloc(struct isci_host *ihost) { struct device *dev = &ihost->pdev->dev; size_t size; int i; /* detect re-initialization */ if (ihost->completion_queue) return 0; size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32); ihost->completion_queue = dmam_alloc_coherent(dev, size, &ihost->cq_dma, GFP_KERNEL); if (!ihost->completion_queue) return -ENOMEM; size = ihost->remote_node_entries * sizeof(union scu_remote_node_context); ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &ihost->rnc_dma, GFP_KERNEL); if (!ihost->remote_node_context_table) return -ENOMEM; size = ihost->task_context_entries * sizeof(struct scu_task_context), ihost->task_context_table = dmam_alloc_coherent(dev, size, &ihost->tc_dma, GFP_KERNEL); if (!ihost->task_context_table) return -ENOMEM; size = SCI_UFI_TOTAL_SIZE; ihost->ufi_buf = dmam_alloc_coherent(dev, size, &ihost->ufi_dma, GFP_KERNEL); if (!ihost->ufi_buf) return -ENOMEM; for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) { struct isci_request *ireq; dma_addr_t dma; ireq = dmam_alloc_coherent(dev, sizeof(*ireq), &dma, GFP_KERNEL); if (!ireq) return -ENOMEM; ireq->tc = &ihost->task_context_table[i]; ireq->owning_controller = ihost; ireq->request_daddr = dma; ireq->isci_host = ihost; ihost->reqs[i] = ireq; } return 0; } static int sci_controller_mem_init(struct isci_host *ihost) { int err = sci_controller_dma_alloc(ihost); if (err) return err; writel(lower_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_lower); writel(upper_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_upper); writel(lower_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_lower); writel(upper_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_upper); writel(lower_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_lower); writel(upper_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_upper); sci_unsolicited_frame_control_construct(ihost); /* * Inform the silicon as to the location of the UF headers and * address table. */ writel(lower_32_bits(ihost->uf_control.headers.physical_address), &ihost->scu_registers->sdma.uf_header_base_address_lower); writel(upper_32_bits(ihost->uf_control.headers.physical_address), &ihost->scu_registers->sdma.uf_header_base_address_upper); writel(lower_32_bits(ihost->uf_control.address_table.physical_address), &ihost->scu_registers->sdma.uf_address_table_lower); writel(upper_32_bits(ihost->uf_control.address_table.physical_address), &ihost->scu_registers->sdma.uf_address_table_upper); return 0; } /** * isci_host_init - (re-)initialize hardware and internal (private) state * @ihost: host to init * * Any public facing objects (like asd_sas_port, and asd_sas_phys), or * one-time initialization objects like locks and waitqueues, are * not touched (they are initialized in isci_host_alloc) */ int isci_host_init(struct isci_host *ihost) { int i, err; enum sci_status status; spin_lock_irq(&ihost->scic_lock); status = sci_controller_construct(ihost, scu_base(ihost), smu_base(ihost)); spin_unlock_irq(&ihost->scic_lock); if (status != SCI_SUCCESS) { dev_err(&ihost->pdev->dev, "%s: sci_controller_construct failed - status = %x\n", __func__, status); return -ENODEV; } spin_lock_irq(&ihost->scic_lock); status = sci_controller_initialize(ihost); spin_unlock_irq(&ihost->scic_lock); if (status != SCI_SUCCESS) { dev_warn(&ihost->pdev->dev, "%s: sci_controller_initialize failed -" " status = 0x%x\n", __func__, status); return -ENODEV; } err = sci_controller_mem_init(ihost); if (err) return err; /* enable sgpio */ writel(1, &ihost->scu_registers->peg0.sgpio.interface_control); for (i = 0; i < isci_gpio_count(ihost); i++) writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]); writel(0, &ihost->scu_registers->peg0.sgpio.vendor_specific_code); return 0; } void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport, struct isci_phy *iphy) { switch (ihost->sm.current_state_id) { case SCIC_STARTING: sci_del_timer(&ihost->phy_timer); ihost->phy_startup_timer_pending = false; ihost->port_agent.link_up_handler(ihost, &ihost->port_agent, iport, iphy); sci_controller_start_next_phy(ihost); break; case SCIC_READY: ihost->port_agent.link_up_handler(ihost, &ihost->port_agent, iport, iphy); break; default: dev_dbg(&ihost->pdev->dev, "%s: SCIC Controller linkup event from phy %d in " "unexpected state %d\n", __func__, iphy->phy_index, ihost->sm.current_state_id); } } void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport, struct isci_phy *iphy) { switch (ihost->sm.current_state_id) { case SCIC_STARTING: case SCIC_READY: ihost->port_agent.link_down_handler(ihost, &ihost->port_agent, iport, iphy); break; default: dev_dbg(&ihost->pdev->dev, "%s: SCIC Controller linkdown event from phy %d in " "unexpected state %d\n", __func__, iphy->phy_index, ihost->sm.current_state_id); } } bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost) { u32 index; for (index = 0; index < ihost->remote_node_entries; index++) { if ((ihost->device_table[index] != NULL) && (ihost->device_table[index]->sm.current_state_id == SCI_DEV_STOPPING)) return true; } return false; } void sci_controller_remote_device_stopped(struct isci_host *ihost, struct isci_remote_device *idev) { if (ihost->sm.current_state_id != SCIC_STOPPING) { dev_dbg(&ihost->pdev->dev, "SCIC Controller 0x%p remote device stopped event " "from device 0x%p in unexpected state %d\n", ihost, idev, ihost->sm.current_state_id); return; } if (!sci_controller_has_remote_devices_stopping(ihost)) isci_host_stop_complete(ihost); } void sci_controller_post_request(struct isci_host *ihost, u32 request) { dev_dbg(&ihost->pdev->dev, "%s[%d]: %#x\n", __func__, ihost->id, request); writel(request, &ihost->smu_registers->post_context_port); } struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag) { u16 task_index; u16 task_sequence; task_index = ISCI_TAG_TCI(io_tag); if (task_index < ihost->task_context_entries) { struct isci_request *ireq = ihost->reqs[task_index]; if (test_bit(IREQ_ACTIVE, &ireq->flags)) { task_sequence = ISCI_TAG_SEQ(io_tag); if (task_sequence == ihost->io_request_sequence[task_index]) return ireq; } } return NULL; } /** * sci_controller_allocate_remote_node_context() * This method allocates remote node index and the reserves the remote node * context space for use. This method can fail if there are no more remote * node index available. * @ihost: This is the controller object which contains the set of * free remote node ids * @idev: This is the device object which is requesting the a remote node * id * @node_id: This is the remote node id that is assinged to the device if one * is available * * enum sci_status SCI_FAILURE_OUT_OF_RESOURCES if there are no available remote * node index available. */ enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost, struct isci_remote_device *idev, u16 *node_id) { u16 node_index; u32 remote_node_count = sci_remote_device_node_count(idev); node_index = sci_remote_node_table_allocate_remote_node( &ihost->available_remote_nodes, remote_node_count ); if (node_index != SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) { ihost->device_table[node_index] = idev; *node_id = node_index; return SCI_SUCCESS; } return SCI_FAILURE_INSUFFICIENT_RESOURCES; } void sci_controller_free_remote_node_context(struct isci_host *ihost, struct isci_remote_device *idev, u16 node_id) { u32 remote_node_count = sci_remote_device_node_count(idev); if (ihost->device_table[node_id] == idev) { ihost->device_table[node_id] = NULL; sci_remote_node_table_release_remote_node_index( &ihost->available_remote_nodes, remote_node_count, node_id ); } } void sci_controller_copy_sata_response(void *response_buffer, void *frame_header, void *frame_buffer) { /* XXX type safety? */ memcpy(response_buffer, frame_header, sizeof(u32)); memcpy(response_buffer + sizeof(u32), frame_buffer, sizeof(struct dev_to_host_fis) - sizeof(u32)); } void sci_controller_release_frame(struct isci_host *ihost, u32 frame_index) { if (sci_unsolicited_frame_control_release_frame(&ihost->uf_control, frame_index)) writel(ihost->uf_control.get, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer); } void isci_tci_free(struct isci_host *ihost, u16 tci) { u16 tail = ihost->tci_tail & (SCI_MAX_IO_REQUESTS-1); ihost->tci_pool[tail] = tci; ihost->tci_tail = tail + 1; } static u16 isci_tci_alloc(struct isci_host *ihost) { u16 head = ihost->tci_head & (SCI_MAX_IO_REQUESTS-1); u16 tci = ihost->tci_pool[head]; ihost->tci_head = head + 1; return tci; } static u16 isci_tci_space(struct isci_host *ihost) { return CIRC_SPACE(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS); } u16 isci_alloc_tag(struct isci_host *ihost) { if (isci_tci_space(ihost)) { u16 tci = isci_tci_alloc(ihost); u8 seq = ihost->io_request_sequence[tci]; return ISCI_TAG(seq, tci); } return SCI_CONTROLLER_INVALID_IO_TAG; } enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag) { u16 tci = ISCI_TAG_TCI(io_tag); u16 seq = ISCI_TAG_SEQ(io_tag); /* prevent tail from passing head */ if (isci_tci_active(ihost) == 0) return SCI_FAILURE_INVALID_IO_TAG; if (seq == ihost->io_request_sequence[tci]) { ihost->io_request_sequence[tci] = (seq+1) & (SCI_MAX_SEQ-1); isci_tci_free(ihost, tci); return SCI_SUCCESS; } return SCI_FAILURE_INVALID_IO_TAG; } enum sci_status sci_controller_start_io(struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq) { enum sci_status status; if (ihost->sm.current_state_id != SCIC_READY) { dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", __func__, ihost->sm.current_state_id); return SCI_FAILURE_INVALID_STATE; } status = sci_remote_device_start_io(ihost, idev, ireq); if (status != SCI_SUCCESS) return status; set_bit(IREQ_ACTIVE, &ireq->flags); sci_controller_post_request(ihost, ireq->post_context); return SCI_SUCCESS; } enum sci_status sci_controller_terminate_request(struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq) { /* terminate an ongoing (i.e. started) core IO request. This does not * abort the IO request at the target, but rather removes the IO * request from the host controller. */ enum sci_status status; if (ihost->sm.current_state_id != SCIC_READY) { dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", __func__, ihost->sm.current_state_id); return SCI_FAILURE_INVALID_STATE; } status = sci_io_request_terminate(ireq); dev_dbg(&ihost->pdev->dev, "%s: status=%d; ireq=%p; flags=%lx\n", __func__, status, ireq, ireq->flags); if ((status == SCI_SUCCESS) && !test_bit(IREQ_PENDING_ABORT, &ireq->flags) && !test_and_set_bit(IREQ_TC_ABORT_POSTED, &ireq->flags)) { /* Utilize the original post context command and or in the * POST_TC_ABORT request sub-type. */ sci_controller_post_request( ihost, ireq->post_context | SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT); } return status; } /** * sci_controller_complete_io() - This method will perform core specific * completion operations for an IO request. After this method is invoked, * the user should consider the IO request as invalid until it is properly * reused (i.e. re-constructed). * @ihost: The handle to the controller object for which to complete the * IO request. * @idev: The handle to the remote device object for which to complete * the IO request. * @ireq: the handle to the io request object to complete. */ enum sci_status sci_controller_complete_io(struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq) { enum sci_status status; switch (ihost->sm.current_state_id) { case SCIC_STOPPING: /* XXX: Implement this function */ return SCI_FAILURE; case SCIC_READY: status = sci_remote_device_complete_io(ihost, idev, ireq); if (status != SCI_SUCCESS) return status; clear_bit(IREQ_ACTIVE, &ireq->flags); return SCI_SUCCESS; default: dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", __func__, ihost->sm.current_state_id); return SCI_FAILURE_INVALID_STATE; } } enum sci_status sci_controller_continue_io(struct isci_request *ireq) { struct isci_host *ihost = ireq->owning_controller; if (ihost->sm.current_state_id != SCIC_READY) { dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", __func__, ihost->sm.current_state_id); return SCI_FAILURE_INVALID_STATE; } set_bit(IREQ_ACTIVE, &ireq->flags); sci_controller_post_request(ihost, ireq->post_context); return SCI_SUCCESS; } /** * sci_controller_start_task() - This method is called by the SCIC user to * send/start a framework task management request. * @ihost: the handle to the controller object for which to start the task * management request. * @idev: the handle to the remote device object for which to start * the task management request. * @ireq: the handle to the task request object to start. */ enum sci_status sci_controller_start_task(struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq) { enum sci_status status; if (ihost->sm.current_state_id != SCIC_READY) { dev_warn(&ihost->pdev->dev, "%s: SCIC Controller starting task from invalid " "state\n", __func__); return SCI_FAILURE_INVALID_STATE; } status = sci_remote_device_start_task(ihost, idev, ireq); switch (status) { case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS: set_bit(IREQ_ACTIVE, &ireq->flags); /* * We will let framework know this task request started successfully, * although core is still woring on starting the request (to post tc when * RNC is resumed.) */ return SCI_SUCCESS; case SCI_SUCCESS: set_bit(IREQ_ACTIVE, &ireq->flags); sci_controller_post_request(ihost, ireq->post_context); break; default: break; } return status; } static int sci_write_gpio_tx_gp(struct isci_host *ihost, u8 reg_index, u8 reg_count, u8 *write_data) { int d; /* no support for TX_GP_CFG */ if (reg_index == 0) return -EINVAL; for (d = 0; d < isci_gpio_count(ihost); d++) { u32 val = 0x444; /* all ODx.n clear */ int i; for (i = 0; i < 3; i++) { int bit; bit = try_test_sas_gpio_gp_bit(to_sas_gpio_od(d, i), write_data, reg_index, reg_count); if (bit < 0) break; /* if od is set, clear the 'invert' bit */ val &= ~(bit << ((i << 2) + 2)); } if (i < 3) break; writel(val, &ihost->scu_registers->peg0.sgpio.output_data_select[d]); } /* unless reg_index is > 1, we should always be able to write at * least one register */ return d > 0; } int isci_gpio_write(struct sas_ha_struct *sas_ha, u8 reg_type, u8 reg_index, u8 reg_count, u8 *write_data) { struct isci_host *ihost = sas_ha->lldd_ha; int written; switch (reg_type) { case SAS_GPIO_REG_TX_GP: written = sci_write_gpio_tx_gp(ihost, reg_index, reg_count, write_data); break; default: written = -EINVAL; } return written; }
linux-master
drivers/scsi/isci/host.c
/* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * The full GNU General Public License is included in this distribution * in the file called LICENSE.GPL. */ /* probe_roms - scan for oem parameters */ #include <linux/kernel.h> #include <linux/firmware.h> #include <linux/uaccess.h> #include <linux/efi.h> #include <asm/probe_roms.h> #include "isci.h" #include "task.h" #include "probe_roms.h" static efi_char16_t isci_efivar_name[] = { 'R', 's', 't', 'S', 'c', 'u', 'O' }; struct isci_orom *isci_request_oprom(struct pci_dev *pdev) { void __iomem *oprom = pci_map_biosrom(pdev); struct isci_orom *rom = NULL; size_t len, i; int j; char oem_sig[4]; struct isci_oem_hdr oem_hdr; u8 *tmp, sum; if (!oprom) return NULL; len = pci_biosrom_size(pdev); rom = devm_kzalloc(&pdev->dev, sizeof(*rom), GFP_KERNEL); if (!rom) { pci_unmap_biosrom(oprom); dev_warn(&pdev->dev, "Unable to allocate memory for orom\n"); return NULL; } for (i = 0; i < len && rom; i += ISCI_OEM_SIG_SIZE) { memcpy_fromio(oem_sig, oprom + i, ISCI_OEM_SIG_SIZE); /* we think we found the OEM table */ if (memcmp(oem_sig, ISCI_OEM_SIG, ISCI_OEM_SIG_SIZE) == 0) { size_t copy_len; memcpy_fromio(&oem_hdr, oprom + i, sizeof(oem_hdr)); copy_len = min(oem_hdr.len - sizeof(oem_hdr), sizeof(*rom)); memcpy_fromio(rom, oprom + i + sizeof(oem_hdr), copy_len); /* calculate checksum */ tmp = (u8 *)&oem_hdr; for (j = 0, sum = 0; j < sizeof(oem_hdr); j++, tmp++) sum += *tmp; tmp = (u8 *)rom; for (j = 0; j < sizeof(*rom); j++, tmp++) sum += *tmp; if (sum != 0) { dev_warn(&pdev->dev, "OEM table checksum failed\n"); continue; } /* keep going if that's not the oem param table */ if (memcmp(rom->hdr.signature, ISCI_ROM_SIG, ISCI_ROM_SIG_SIZE) != 0) continue; dev_info(&pdev->dev, "OEM parameter table found in OROM\n"); break; } } if (i >= len) { dev_err(&pdev->dev, "oprom parse error\n"); rom = NULL; } pci_unmap_biosrom(oprom); return rom; } struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw) { struct isci_orom *orom = NULL, *data; int i, j; if (request_firmware(&fw, ISCI_FW_NAME, &pdev->dev) != 0) return NULL; if (fw->size < sizeof(*orom)) goto out; data = (struct isci_orom *)fw->data; if (strncmp(ISCI_ROM_SIG, data->hdr.signature, strlen(ISCI_ROM_SIG)) != 0) goto out; orom = devm_kzalloc(&pdev->dev, fw->size, GFP_KERNEL); if (!orom) goto out; memcpy(orom, fw->data, fw->size); if (is_c0(pdev) || is_c1(pdev)) goto out; /* * deprecated: override default amp_control for pre-preproduction * silicon revisions */ for (i = 0; i < ARRAY_SIZE(orom->ctrl); i++) for (j = 0; j < ARRAY_SIZE(orom->ctrl[i].phys); j++) { orom->ctrl[i].phys[j].afe_tx_amp_control0 = 0xe7c03; orom->ctrl[i].phys[j].afe_tx_amp_control1 = 0xe7c03; orom->ctrl[i].phys[j].afe_tx_amp_control2 = 0xe7c03; orom->ctrl[i].phys[j].afe_tx_amp_control3 = 0xe7c03; } out: release_firmware(fw); return orom; } static struct efi *get_efi(void) { #ifdef CONFIG_EFI return &efi; #else return NULL; #endif } struct isci_orom *isci_get_efi_var(struct pci_dev *pdev) { efi_status_t status; struct isci_orom *rom; struct isci_oem_hdr *oem_hdr; u8 *tmp, sum; int j; unsigned long data_len; u8 *efi_data; u32 efi_attrib = 0; data_len = 1024; efi_data = devm_kzalloc(&pdev->dev, data_len, GFP_KERNEL); if (!efi_data) { dev_warn(&pdev->dev, "Unable to allocate memory for EFI data\n"); return NULL; } rom = (struct isci_orom *)(efi_data + sizeof(struct isci_oem_hdr)); if (get_efi()) status = get_efi()->get_variable(isci_efivar_name, &ISCI_EFI_VENDOR_GUID, &efi_attrib, &data_len, efi_data); else status = EFI_NOT_FOUND; if (status != EFI_SUCCESS) { dev_warn(&pdev->dev, "Unable to obtain EFI var data for OEM parms\n"); return NULL; } oem_hdr = (struct isci_oem_hdr *)efi_data; if (memcmp(oem_hdr->sig, ISCI_OEM_SIG, ISCI_OEM_SIG_SIZE) != 0) { dev_warn(&pdev->dev, "Invalid OEM header signature\n"); return NULL; } /* calculate checksum */ tmp = (u8 *)efi_data; for (j = 0, sum = 0; j < (sizeof(*oem_hdr) + sizeof(*rom)); j++, tmp++) sum += *tmp; if (sum != 0) { dev_warn(&pdev->dev, "OEM table checksum failed\n"); return NULL; } if (memcmp(rom->hdr.signature, ISCI_ROM_SIG, ISCI_ROM_SIG_SIZE) != 0) { dev_warn(&pdev->dev, "Invalid OEM table signature\n"); return NULL; } return rom; }
linux-master
drivers/scsi/isci/probe_roms.c
/* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * The full GNU General Public License is included in this distribution * in the file called LICENSE.GPL. * * BSD LICENSE * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "isci.h" #include "port.h" #include "request.h" #define SCIC_SDS_PORT_HARD_RESET_TIMEOUT (1000) #define SCU_DUMMY_INDEX (0xFFFF) #undef C #define C(a) (#a) static const char *port_state_name(enum sci_port_states state) { static const char * const strings[] = PORT_STATES; return strings[state]; } #undef C static struct device *sciport_to_dev(struct isci_port *iport) { int i = iport->physical_port_index; struct isci_port *table; struct isci_host *ihost; if (i == SCIC_SDS_DUMMY_PORT) i = SCI_MAX_PORTS+1; table = iport - i; ihost = container_of(table, typeof(*ihost), ports[0]); return &ihost->pdev->dev; } static void sci_port_get_protocols(struct isci_port *iport, struct sci_phy_proto *proto) { u8 index; proto->all = 0; for (index = 0; index < SCI_MAX_PHYS; index++) { struct isci_phy *iphy = iport->phy_table[index]; if (!iphy) continue; sci_phy_get_protocols(iphy, proto); } } static u32 sci_port_get_phys(struct isci_port *iport) { u32 index; u32 mask; mask = 0; for (index = 0; index < SCI_MAX_PHYS; index++) if (iport->phy_table[index]) mask |= (1 << index); return mask; } /** * sci_port_get_properties() - This method simply returns the properties * regarding the port, such as: physical index, protocols, sas address, etc. * @iport: this parameter specifies the port for which to retrieve the physical * index. * @prop: This parameter specifies the properties structure into which to * copy the requested information. * * Indicate if the user specified a valid port. SCI_SUCCESS This value is * returned if the specified port was valid. SCI_FAILURE_INVALID_PORT This * value is returned if the specified port is not valid. When this value is * returned, no data is copied to the properties output parameter. */ enum sci_status sci_port_get_properties(struct isci_port *iport, struct sci_port_properties *prop) { if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT) return SCI_FAILURE_INVALID_PORT; prop->index = iport->logical_port_index; prop->phy_mask = sci_port_get_phys(iport); sci_port_get_sas_address(iport, &prop->local.sas_address); sci_port_get_protocols(iport, &prop->local.protocols); sci_port_get_attached_sas_address(iport, &prop->remote.sas_address); return SCI_SUCCESS; } static void sci_port_bcn_enable(struct isci_port *iport) { struct isci_phy *iphy; u32 val; int i; for (i = 0; i < ARRAY_SIZE(iport->phy_table); i++) { iphy = iport->phy_table[i]; if (!iphy) continue; val = readl(&iphy->link_layer_registers->link_layer_control); /* clear the bit by writing 1. */ writel(val, &iphy->link_layer_registers->link_layer_control); } } static void isci_port_bc_change_received(struct isci_host *ihost, struct isci_port *iport, struct isci_phy *iphy) { dev_dbg(&ihost->pdev->dev, "%s: isci_phy = %p, sas_phy = %p\n", __func__, iphy, &iphy->sas_phy); sas_notify_port_event(&iphy->sas_phy, PORTE_BROADCAST_RCVD, GFP_ATOMIC); sci_port_bcn_enable(iport); } static void isci_port_link_up(struct isci_host *isci_host, struct isci_port *iport, struct isci_phy *iphy) { unsigned long flags; struct sci_port_properties properties; unsigned long success = true; dev_dbg(&isci_host->pdev->dev, "%s: isci_port = %p\n", __func__, iport); spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags); sci_port_get_properties(iport, &properties); if (iphy->protocol == SAS_PROTOCOL_SATA) { u64 attached_sas_address; iphy->sas_phy.oob_mode = SATA_OOB_MODE; iphy->sas_phy.frame_rcvd_size = sizeof(struct dev_to_host_fis); /* * For direct-attached SATA devices, the SCI core will * automagically assign a SAS address to the end device * for the purpose of creating a port. This SAS address * will not be the same as assigned to the PHY and needs * to be obtained from struct sci_port_properties properties. */ attached_sas_address = properties.remote.sas_address.high; attached_sas_address <<= 32; attached_sas_address |= properties.remote.sas_address.low; swab64s(&attached_sas_address); memcpy(&iphy->sas_phy.attached_sas_addr, &attached_sas_address, sizeof(attached_sas_address)); } else if (iphy->protocol == SAS_PROTOCOL_SSP) { iphy->sas_phy.oob_mode = SAS_OOB_MODE; iphy->sas_phy.frame_rcvd_size = sizeof(struct sas_identify_frame); /* Copy the attached SAS address from the IAF */ memcpy(iphy->sas_phy.attached_sas_addr, iphy->frame_rcvd.iaf.sas_addr, SAS_ADDR_SIZE); } else { dev_err(&isci_host->pdev->dev, "%s: unknown target\n", __func__); success = false; } iphy->sas_phy.phy->negotiated_linkrate = sci_phy_linkrate(iphy); spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags); /* Notify libsas that we have an address frame, if indeed * we've found an SSP, SMP, or STP target */ if (success) sas_notify_port_event(&iphy->sas_phy, PORTE_BYTES_DMAED, GFP_ATOMIC); } /** * isci_port_link_down() - This function is called by the sci core when a link * becomes inactive. * @isci_host: This parameter specifies the isci host object. * @isci_phy: This parameter specifies the isci phy with the active link. * @isci_port: This parameter specifies the isci port with the active link. * */ static void isci_port_link_down(struct isci_host *isci_host, struct isci_phy *isci_phy, struct isci_port *isci_port) { struct isci_remote_device *isci_device; dev_dbg(&isci_host->pdev->dev, "%s: isci_port = %p\n", __func__, isci_port); if (isci_port) { /* check to see if this is the last phy on this port. */ if (isci_phy->sas_phy.port && isci_phy->sas_phy.port->num_phys == 1) { /* change the state for all devices on this port. The * next task sent to this device will be returned as * SAS_TASK_UNDELIVERED, and the scsi mid layer will * remove the target */ list_for_each_entry(isci_device, &isci_port->remote_dev_list, node) { dev_dbg(&isci_host->pdev->dev, "%s: isci_device = %p\n", __func__, isci_device); set_bit(IDEV_GONE, &isci_device->flags); } } } /* Notify libsas of the borken link, this will trigger calls to our * isci_port_deformed and isci_dev_gone functions. */ sas_phy_disconnected(&isci_phy->sas_phy); sas_notify_phy_event(&isci_phy->sas_phy, PHYE_LOSS_OF_SIGNAL, GFP_ATOMIC); dev_dbg(&isci_host->pdev->dev, "%s: isci_port = %p - Done\n", __func__, isci_port); } static bool is_port_ready_state(enum sci_port_states state) { switch (state) { case SCI_PORT_READY: case SCI_PORT_SUB_WAITING: case SCI_PORT_SUB_OPERATIONAL: case SCI_PORT_SUB_CONFIGURING: return true; default: return false; } } /* flag dummy rnc hanling when exiting a ready state */ static void port_state_machine_change(struct isci_port *iport, enum sci_port_states state) { struct sci_base_state_machine *sm = &iport->sm; enum sci_port_states old_state = sm->current_state_id; if (is_port_ready_state(old_state) && !is_port_ready_state(state)) iport->ready_exit = true; sci_change_state(sm, state); iport->ready_exit = false; } /** * isci_port_hard_reset_complete() - This function is called by the sci core * when the hard reset complete notification has been received. * @isci_port: This parameter specifies the sci port with the active link. * @completion_status: This parameter specifies the core status for the reset * process. * */ static void isci_port_hard_reset_complete(struct isci_port *isci_port, enum sci_status completion_status) { struct isci_host *ihost = isci_port->owning_controller; dev_dbg(&ihost->pdev->dev, "%s: isci_port = %p, completion_status=%x\n", __func__, isci_port, completion_status); /* Save the status of the hard reset from the port. */ isci_port->hard_reset_status = completion_status; if (completion_status != SCI_SUCCESS) { /* The reset failed. The port state is now SCI_PORT_FAILED. */ if (isci_port->active_phy_mask == 0) { int phy_idx = isci_port->last_active_phy; struct isci_phy *iphy = &ihost->phys[phy_idx]; /* Generate the link down now to the host, since it * was intercepted by the hard reset state machine when * it really happened. */ isci_port_link_down(ihost, iphy, isci_port); } /* Advance the port state so that link state changes will be * noticed. */ port_state_machine_change(isci_port, SCI_PORT_SUB_WAITING); } clear_bit(IPORT_RESET_PENDING, &isci_port->state); wake_up(&ihost->eventq); } /* This method will return a true value if the specified phy can be assigned to * this port The following is a list of phys for each port that are allowed: - * Port 0 - 3 2 1 0 - Port 1 - 1 - Port 2 - 3 2 - Port 3 - 3 This method * doesn't preclude all configurations. It merely ensures that a phy is part * of the allowable set of phy identifiers for that port. For example, one * could assign phy 3 to port 0 and no other phys. Please refer to * sci_port_is_phy_mask_valid() for information regarding whether the * phy_mask for a port can be supported. bool true if this is a valid phy * assignment for the port false if this is not a valid phy assignment for the * port */ bool sci_port_is_valid_phy_assignment(struct isci_port *iport, u32 phy_index) { struct isci_host *ihost = iport->owning_controller; struct sci_user_parameters *user = &ihost->user_parameters; /* Initialize to invalid value. */ u32 existing_phy_index = SCI_MAX_PHYS; u32 index; if ((iport->physical_port_index == 1) && (phy_index != 1)) return false; if (iport->physical_port_index == 3 && phy_index != 3) return false; if (iport->physical_port_index == 2 && (phy_index == 0 || phy_index == 1)) return false; for (index = 0; index < SCI_MAX_PHYS; index++) if (iport->phy_table[index] && index != phy_index) existing_phy_index = index; /* Ensure that all of the phys in the port are capable of * operating at the same maximum link rate. */ if (existing_phy_index < SCI_MAX_PHYS && user->phys[phy_index].max_speed_generation != user->phys[existing_phy_index].max_speed_generation) return false; return true; } /** * sci_port_is_phy_mask_valid() * @iport: This is the port object for which to determine if the phy mask * can be supported. * @phy_mask: Phy mask belonging to this port * * This method will return a true value if the port's phy mask can be supported * by the SCU. The following is a list of valid PHY mask configurations for * each port: - Port 0 - [[3 2] 1] 0 - Port 1 - [1] - Port 2 - [[3] 2] * - Port 3 - [3] This method returns a boolean indication specifying if the * phy mask can be supported. true if this is a valid phy assignment for the * port false if this is not a valid phy assignment for the port */ static bool sci_port_is_phy_mask_valid( struct isci_port *iport, u32 phy_mask) { if (iport->physical_port_index == 0) { if (((phy_mask & 0x0F) == 0x0F) || ((phy_mask & 0x03) == 0x03) || ((phy_mask & 0x01) == 0x01) || (phy_mask == 0)) return true; } else if (iport->physical_port_index == 1) { if (((phy_mask & 0x02) == 0x02) || (phy_mask == 0)) return true; } else if (iport->physical_port_index == 2) { if (((phy_mask & 0x0C) == 0x0C) || ((phy_mask & 0x04) == 0x04) || (phy_mask == 0)) return true; } else if (iport->physical_port_index == 3) { if (((phy_mask & 0x08) == 0x08) || (phy_mask == 0)) return true; } return false; } /* * This method retrieves a currently active (i.e. connected) phy contained in * the port. Currently, the lowest order phy that is connected is returned. * This method returns a pointer to a SCIS_SDS_PHY object. NULL This value is * returned if there are no currently active (i.e. connected to a remote end * point) phys contained in the port. All other values specify a struct sci_phy * object that is active in the port. */ static struct isci_phy *sci_port_get_a_connected_phy(struct isci_port *iport) { u32 index; struct isci_phy *iphy; for (index = 0; index < SCI_MAX_PHYS; index++) { /* Ensure that the phy is both part of the port and currently * connected to the remote end-point. */ iphy = iport->phy_table[index]; if (iphy && sci_port_active_phy(iport, iphy)) return iphy; } return NULL; } static enum sci_status sci_port_set_phy(struct isci_port *iport, struct isci_phy *iphy) { /* Check to see if we can add this phy to a port * that means that the phy is not part of a port and that the port does * not already have a phy assinged to the phy index. */ if (!iport->phy_table[iphy->phy_index] && !phy_get_non_dummy_port(iphy) && sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) { /* Phy is being added in the stopped state so we are in MPC mode * make logical port index = physical port index */ iport->logical_port_index = iport->physical_port_index; iport->phy_table[iphy->phy_index] = iphy; sci_phy_set_port(iphy, iport); return SCI_SUCCESS; } return SCI_FAILURE; } static enum sci_status sci_port_clear_phy(struct isci_port *iport, struct isci_phy *iphy) { /* Make sure that this phy is part of this port */ if (iport->phy_table[iphy->phy_index] == iphy && phy_get_non_dummy_port(iphy) == iport) { struct isci_host *ihost = iport->owning_controller; /* Yep it is assigned to this port so remove it */ sci_phy_set_port(iphy, &ihost->ports[SCI_MAX_PORTS]); iport->phy_table[iphy->phy_index] = NULL; return SCI_SUCCESS; } return SCI_FAILURE; } void sci_port_get_sas_address(struct isci_port *iport, struct sci_sas_address *sas) { u32 index; sas->high = 0; sas->low = 0; for (index = 0; index < SCI_MAX_PHYS; index++) if (iport->phy_table[index]) sci_phy_get_sas_address(iport->phy_table[index], sas); } void sci_port_get_attached_sas_address(struct isci_port *iport, struct sci_sas_address *sas) { struct isci_phy *iphy; /* * Ensure that the phy is both part of the port and currently * connected to the remote end-point. */ iphy = sci_port_get_a_connected_phy(iport); if (iphy) { if (iphy->protocol != SAS_PROTOCOL_SATA) { sci_phy_get_attached_sas_address(iphy, sas); } else { sci_phy_get_sas_address(iphy, sas); sas->low += iphy->phy_index; } } else { sas->high = 0; sas->low = 0; } } /** * sci_port_construct_dummy_rnc() - create dummy rnc for si workaround * * @iport: logical port on which we need to create the remote node context * @rni: remote node index for this remote node context. * * This routine will construct a dummy remote node context data structure * This structure will be posted to the hardware to work around a scheduler * error in the hardware. */ static void sci_port_construct_dummy_rnc(struct isci_port *iport, u16 rni) { union scu_remote_node_context *rnc; rnc = &iport->owning_controller->remote_node_context_table[rni]; memset(rnc, 0, sizeof(union scu_remote_node_context)); rnc->ssp.remote_sas_address_hi = 0; rnc->ssp.remote_sas_address_lo = 0; rnc->ssp.remote_node_index = rni; rnc->ssp.remote_node_port_width = 1; rnc->ssp.logical_port_index = iport->physical_port_index; rnc->ssp.nexus_loss_timer_enable = false; rnc->ssp.check_bit = false; rnc->ssp.is_valid = true; rnc->ssp.is_remote_node_context = true; rnc->ssp.function_number = 0; rnc->ssp.arbitration_wait_time = 0; } /* * construct a dummy task context data structure. This * structure will be posted to the hardwre to work around a scheduler error * in the hardware. */ static void sci_port_construct_dummy_task(struct isci_port *iport, u16 tag) { struct isci_host *ihost = iport->owning_controller; struct scu_task_context *task_context; task_context = &ihost->task_context_table[ISCI_TAG_TCI(tag)]; memset(task_context, 0, sizeof(struct scu_task_context)); task_context->initiator_request = 1; task_context->connection_rate = 1; task_context->logical_port_index = iport->physical_port_index; task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP; task_context->task_index = ISCI_TAG_TCI(tag); task_context->valid = SCU_TASK_CONTEXT_VALID; task_context->context_type = SCU_TASK_CONTEXT_TYPE; task_context->remote_node_index = iport->reserved_rni; task_context->do_not_dma_ssp_good_response = 1; task_context->task_phase = 0x01; } static void sci_port_destroy_dummy_resources(struct isci_port *iport) { struct isci_host *ihost = iport->owning_controller; if (iport->reserved_tag != SCI_CONTROLLER_INVALID_IO_TAG) isci_free_tag(ihost, iport->reserved_tag); if (iport->reserved_rni != SCU_DUMMY_INDEX) sci_remote_node_table_release_remote_node_index(&ihost->available_remote_nodes, 1, iport->reserved_rni); iport->reserved_rni = SCU_DUMMY_INDEX; iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG; } void sci_port_setup_transports(struct isci_port *iport, u32 device_id) { u8 index; for (index = 0; index < SCI_MAX_PHYS; index++) { if (iport->active_phy_mask & (1 << index)) sci_phy_setup_transport(iport->phy_table[index], device_id); } } static void sci_port_resume_phy(struct isci_port *iport, struct isci_phy *iphy) { sci_phy_resume(iphy); iport->enabled_phy_mask |= 1 << iphy->phy_index; } static void sci_port_activate_phy(struct isci_port *iport, struct isci_phy *iphy, u8 flags) { struct isci_host *ihost = iport->owning_controller; if (iphy->protocol != SAS_PROTOCOL_SATA && (flags & PF_RESUME)) sci_phy_resume(iphy); iport->active_phy_mask |= 1 << iphy->phy_index; sci_controller_clear_invalid_phy(ihost, iphy); if (flags & PF_NOTIFY) isci_port_link_up(ihost, iport, iphy); } void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy, bool do_notify_user) { struct isci_host *ihost = iport->owning_controller; iport->active_phy_mask &= ~(1 << iphy->phy_index); iport->enabled_phy_mask &= ~(1 << iphy->phy_index); if (!iport->active_phy_mask) iport->last_active_phy = iphy->phy_index; iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN; /* Re-assign the phy back to the LP as if it were a narrow port for APC * mode. For MPC mode, the phy will remain in the port. */ if (iport->owning_controller->oem_parameters.controller.mode_type == SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE) writel(iphy->phy_index, &iport->port_pe_configuration_register[iphy->phy_index]); if (do_notify_user == true) isci_port_link_down(ihost, iphy, iport); } static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *iphy) { struct isci_host *ihost = iport->owning_controller; /* * Check to see if we have alreay reported this link as bad and if * not go ahead and tell the SCI_USER that we have discovered an * invalid link. */ if ((ihost->invalid_phy_mask & (1 << iphy->phy_index)) == 0) { ihost->invalid_phy_mask |= 1 << iphy->phy_index; dev_warn(&ihost->pdev->dev, "Invalid link up!\n"); } } /** * sci_port_general_link_up_handler - phy can be assigned to port? * @iport: sci_port object for which has a phy that has gone link up. * @iphy: This is the struct isci_phy object that has gone link up. * @flags: PF_RESUME, PF_NOTIFY to sci_port_activate_phy * * Determine if this phy can be assigned to this port . If the phy is * not a valid PHY for this port then the function will notify the user. * A PHY can only be part of a port if it's attached SAS ADDRESS is the * same as all other PHYs in the same port. */ static void sci_port_general_link_up_handler(struct isci_port *iport, struct isci_phy *iphy, u8 flags) { struct sci_sas_address port_sas_address; struct sci_sas_address phy_sas_address; sci_port_get_attached_sas_address(iport, &port_sas_address); sci_phy_get_attached_sas_address(iphy, &phy_sas_address); /* If the SAS address of the new phy matches the SAS address of * other phys in the port OR this is the first phy in the port, * then activate the phy and allow it to be used for operations * in this port. */ if ((phy_sas_address.high == port_sas_address.high && phy_sas_address.low == port_sas_address.low) || iport->active_phy_mask == 0) { struct sci_base_state_machine *sm = &iport->sm; sci_port_activate_phy(iport, iphy, flags); if (sm->current_state_id == SCI_PORT_RESETTING) port_state_machine_change(iport, SCI_PORT_READY); } else sci_port_invalid_link_up(iport, iphy); } /** * sci_port_is_wide() * This method returns false if the port only has a single phy object assigned. * If there are no phys or more than one phy then the method will return * true. * @iport: The port for which the wide port condition is to be checked. * * bool true Is returned if this is a wide ported port. false Is returned if * this is a narrow port. */ static bool sci_port_is_wide(struct isci_port *iport) { u32 index; u32 phy_count = 0; for (index = 0; index < SCI_MAX_PHYS; index++) { if (iport->phy_table[index] != NULL) { phy_count++; } } return phy_count != 1; } /** * sci_port_link_detected() * This method is called by the PHY object when the link is detected. if the * port wants the PHY to continue on to the link up state then the port * layer must return true. If the port object returns false the phy object * must halt its attempt to go link up. * @iport: The port associated with the phy object. * @iphy: The phy object that is trying to go link up. * * true if the phy object can continue to the link up condition. true Is * returned if this phy can continue to the ready state. false Is returned if * can not continue on to the ready state. This notification is in place for * wide ports and direct attached phys. Since there are no wide ported SATA * devices this could become an invalid port configuration. */ bool sci_port_link_detected(struct isci_port *iport, struct isci_phy *iphy) { if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) && (iphy->protocol == SAS_PROTOCOL_SATA)) { if (sci_port_is_wide(iport)) { sci_port_invalid_link_up(iport, iphy); return false; } else { struct isci_host *ihost = iport->owning_controller; struct isci_port *dst_port = &(ihost->ports[iphy->phy_index]); writel(iphy->phy_index, &dst_port->port_pe_configuration_register[iphy->phy_index]); } } return true; } static void port_timeout(struct timer_list *t) { struct sci_timer *tmr = from_timer(tmr, t, timer); struct isci_port *iport = container_of(tmr, typeof(*iport), timer); struct isci_host *ihost = iport->owning_controller; unsigned long flags; u32 current_state; spin_lock_irqsave(&ihost->scic_lock, flags); if (tmr->cancel) goto done; current_state = iport->sm.current_state_id; if (current_state == SCI_PORT_RESETTING) { /* if the port is still in the resetting state then the timeout * fired before the reset completed. */ port_state_machine_change(iport, SCI_PORT_FAILED); } else if (current_state == SCI_PORT_STOPPED) { /* if the port is stopped then the start request failed In this * case stay in the stopped state. */ dev_err(sciport_to_dev(iport), "%s: SCIC Port 0x%p failed to stop before timeout.\n", __func__, iport); } else if (current_state == SCI_PORT_STOPPING) { dev_dbg(sciport_to_dev(iport), "%s: port%d: stop complete timeout\n", __func__, iport->physical_port_index); } else { /* The port is in the ready state and we have a timer * reporting a timeout this should not happen. */ dev_err(sciport_to_dev(iport), "%s: SCIC Port 0x%p is processing a timeout operation " "in state %d.\n", __func__, iport, current_state); } done: spin_unlock_irqrestore(&ihost->scic_lock, flags); } /* --------------------------------------------------------------------------- */ /* * This function updates the hardwares VIIT entry for this port. */ static void sci_port_update_viit_entry(struct isci_port *iport) { struct sci_sas_address sas_address; sci_port_get_sas_address(iport, &sas_address); writel(sas_address.high, &iport->viit_registers->initiator_sas_address_hi); writel(sas_address.low, &iport->viit_registers->initiator_sas_address_lo); /* This value get cleared just in case its not already cleared */ writel(0, &iport->viit_registers->reserved); /* We are required to update the status register last */ writel(SCU_VIIT_ENTRY_ID_VIIT | SCU_VIIT_IPPT_INITIATOR | ((1 << iport->physical_port_index) << SCU_VIIT_ENTRY_LPVIE_SHIFT) | SCU_VIIT_STATUS_ALL_VALID, &iport->viit_registers->status); } enum sas_linkrate sci_port_get_max_allowed_speed(struct isci_port *iport) { u16 index; struct isci_phy *iphy; enum sas_linkrate max_allowed_speed = SAS_LINK_RATE_6_0_GBPS; /* * Loop through all of the phys in this port and find the phy with the * lowest maximum link rate. */ for (index = 0; index < SCI_MAX_PHYS; index++) { iphy = iport->phy_table[index]; if (iphy && sci_port_active_phy(iport, iphy) && iphy->max_negotiated_speed < max_allowed_speed) max_allowed_speed = iphy->max_negotiated_speed; } return max_allowed_speed; } static void sci_port_suspend_port_task_scheduler(struct isci_port *iport) { u32 pts_control_value; pts_control_value = readl(&iport->port_task_scheduler_registers->control); pts_control_value |= SCU_PTSxCR_GEN_BIT(SUSPEND); writel(pts_control_value, &iport->port_task_scheduler_registers->control); } /** * sci_port_post_dummy_request() - post dummy/workaround request * @iport: port to post task * * Prevent the hardware scheduler from posting new requests to the front * of the scheduler queue causing a starvation problem for currently * ongoing requests. * */ static void sci_port_post_dummy_request(struct isci_port *iport) { struct isci_host *ihost = iport->owning_controller; u16 tag = iport->reserved_tag; struct scu_task_context *tc; u32 command; tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)]; tc->abort = 0; command = SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | ISCI_TAG_TCI(tag); sci_controller_post_request(ihost, command); } /** * sci_port_abort_dummy_request() * This routine will abort the dummy request. This will allow the hardware to * power down parts of the silicon to save power. * * @iport: The port on which the task must be aborted. * */ static void sci_port_abort_dummy_request(struct isci_port *iport) { struct isci_host *ihost = iport->owning_controller; u16 tag = iport->reserved_tag; struct scu_task_context *tc; u32 command; tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)]; tc->abort = 1; command = SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT | iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | ISCI_TAG_TCI(tag); sci_controller_post_request(ihost, command); } /** * sci_port_resume_port_task_scheduler() * @iport: This is the struct isci_port object to resume. * * This method will resume the port task scheduler for this port object. none */ static void sci_port_resume_port_task_scheduler(struct isci_port *iport) { u32 pts_control_value; pts_control_value = readl(&iport->port_task_scheduler_registers->control); pts_control_value &= ~SCU_PTSxCR_GEN_BIT(SUSPEND); writel(pts_control_value, &iport->port_task_scheduler_registers->control); } static void sci_port_ready_substate_waiting_enter(struct sci_base_state_machine *sm) { struct isci_port *iport = container_of(sm, typeof(*iport), sm); sci_port_suspend_port_task_scheduler(iport); iport->not_ready_reason = SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS; if (iport->active_phy_mask != 0) { /* At least one of the phys on the port is ready */ port_state_machine_change(iport, SCI_PORT_SUB_OPERATIONAL); } } static void scic_sds_port_ready_substate_waiting_exit( struct sci_base_state_machine *sm) { struct isci_port *iport = container_of(sm, typeof(*iport), sm); sci_port_resume_port_task_scheduler(iport); } static void sci_port_ready_substate_operational_enter(struct sci_base_state_machine *sm) { u32 index; struct isci_port *iport = container_of(sm, typeof(*iport), sm); struct isci_host *ihost = iport->owning_controller; dev_dbg(&ihost->pdev->dev, "%s: port%d ready\n", __func__, iport->physical_port_index); for (index = 0; index < SCI_MAX_PHYS; index++) { if (iport->phy_table[index]) { writel(iport->physical_port_index, &iport->port_pe_configuration_register[ iport->phy_table[index]->phy_index]); if (((iport->active_phy_mask^iport->enabled_phy_mask) & (1 << index)) != 0) sci_port_resume_phy(iport, iport->phy_table[index]); } } sci_port_update_viit_entry(iport); /* * Post the dummy task for the port so the hardware can schedule * io correctly */ sci_port_post_dummy_request(iport); } static void sci_port_invalidate_dummy_remote_node(struct isci_port *iport) { struct isci_host *ihost = iport->owning_controller; u8 phys_index = iport->physical_port_index; union scu_remote_node_context *rnc; u16 rni = iport->reserved_rni; u32 command; rnc = &ihost->remote_node_context_table[rni]; rnc->ssp.is_valid = false; /* ensure the preceding tc abort request has reached the * controller and give it ample time to act before posting the rnc * invalidate */ readl(&ihost->smu_registers->interrupt_status); /* flush */ udelay(10); command = SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE | phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni; sci_controller_post_request(ihost, command); } /** * sci_port_ready_substate_operational_exit() * @sm: This is the object which is cast to a struct isci_port object. * * This method will perform the actions required by the struct isci_port on * exiting the SCI_PORT_SUB_OPERATIONAL. This function reports * the port not ready and suspends the port task scheduler. none */ static void sci_port_ready_substate_operational_exit(struct sci_base_state_machine *sm) { struct isci_port *iport = container_of(sm, typeof(*iport), sm); struct isci_host *ihost = iport->owning_controller; /* * Kill the dummy task for this port if it has not yet posted * the hardware will treat this as a NOP and just return abort * complete. */ sci_port_abort_dummy_request(iport); dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n", __func__, iport->physical_port_index); if (iport->ready_exit) sci_port_invalidate_dummy_remote_node(iport); } static void sci_port_ready_substate_configuring_enter(struct sci_base_state_machine *sm) { struct isci_port *iport = container_of(sm, typeof(*iport), sm); struct isci_host *ihost = iport->owning_controller; if (iport->active_phy_mask == 0) { dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n", __func__, iport->physical_port_index); port_state_machine_change(iport, SCI_PORT_SUB_WAITING); } else port_state_machine_change(iport, SCI_PORT_SUB_OPERATIONAL); } enum sci_status sci_port_start(struct isci_port *iport) { struct isci_host *ihost = iport->owning_controller; enum sci_status status = SCI_SUCCESS; enum sci_port_states state; u32 phy_mask; state = iport->sm.current_state_id; if (state != SCI_PORT_STOPPED) { dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", __func__, port_state_name(state)); return SCI_FAILURE_INVALID_STATE; } if (iport->assigned_device_count > 0) { /* TODO This is a start failure operation because * there are still devices assigned to this port. * There must be no devices assigned to a port on a * start operation. */ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; } if (iport->reserved_rni == SCU_DUMMY_INDEX) { u16 rni = sci_remote_node_table_allocate_remote_node( &ihost->available_remote_nodes, 1); if (rni != SCU_DUMMY_INDEX) sci_port_construct_dummy_rnc(iport, rni); else status = SCI_FAILURE_INSUFFICIENT_RESOURCES; iport->reserved_rni = rni; } if (iport->reserved_tag == SCI_CONTROLLER_INVALID_IO_TAG) { u16 tag; tag = isci_alloc_tag(ihost); if (tag == SCI_CONTROLLER_INVALID_IO_TAG) status = SCI_FAILURE_INSUFFICIENT_RESOURCES; else sci_port_construct_dummy_task(iport, tag); iport->reserved_tag = tag; } if (status == SCI_SUCCESS) { phy_mask = sci_port_get_phys(iport); /* * There are one or more phys assigned to this port. Make sure * the port's phy mask is in fact legal and supported by the * silicon. */ if (sci_port_is_phy_mask_valid(iport, phy_mask) == true) { port_state_machine_change(iport, SCI_PORT_READY); return SCI_SUCCESS; } status = SCI_FAILURE; } if (status != SCI_SUCCESS) sci_port_destroy_dummy_resources(iport); return status; } enum sci_status sci_port_stop(struct isci_port *iport) { enum sci_port_states state; state = iport->sm.current_state_id; switch (state) { case SCI_PORT_STOPPED: return SCI_SUCCESS; case SCI_PORT_SUB_WAITING: case SCI_PORT_SUB_OPERATIONAL: case SCI_PORT_SUB_CONFIGURING: case SCI_PORT_RESETTING: port_state_machine_change(iport, SCI_PORT_STOPPING); return SCI_SUCCESS; default: dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", __func__, port_state_name(state)); return SCI_FAILURE_INVALID_STATE; } } static enum sci_status sci_port_hard_reset(struct isci_port *iport, u32 timeout) { enum sci_status status = SCI_FAILURE_INVALID_PHY; struct isci_phy *iphy = NULL; enum sci_port_states state; u32 phy_index; state = iport->sm.current_state_id; if (state != SCI_PORT_SUB_OPERATIONAL) { dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", __func__, port_state_name(state)); return SCI_FAILURE_INVALID_STATE; } /* Select a phy on which we can send the hard reset request. */ for (phy_index = 0; phy_index < SCI_MAX_PHYS && !iphy; phy_index++) { iphy = iport->phy_table[phy_index]; if (iphy && !sci_port_active_phy(iport, iphy)) { /* * We found a phy but it is not ready select * different phy */ iphy = NULL; } } /* If we have a phy then go ahead and start the reset procedure */ if (!iphy) return status; status = sci_phy_reset(iphy); if (status != SCI_SUCCESS) return status; sci_mod_timer(&iport->timer, timeout); iport->not_ready_reason = SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED; port_state_machine_change(iport, SCI_PORT_RESETTING); return SCI_SUCCESS; } /** * sci_port_add_phy() * @iport: This parameter specifies the port in which the phy will be added. * @iphy: This parameter is the phy which is to be added to the port. * * This method will add a PHY to the selected port. This method returns an * enum sci_status. SCI_SUCCESS the phy has been added to the port. Any other * status is a failure to add the phy to the port. */ enum sci_status sci_port_add_phy(struct isci_port *iport, struct isci_phy *iphy) { enum sci_status status; enum sci_port_states state; sci_port_bcn_enable(iport); state = iport->sm.current_state_id; switch (state) { case SCI_PORT_STOPPED: { struct sci_sas_address port_sas_address; /* Read the port assigned SAS Address if there is one */ sci_port_get_sas_address(iport, &port_sas_address); if (port_sas_address.high != 0 && port_sas_address.low != 0) { struct sci_sas_address phy_sas_address; /* Make sure that the PHY SAS Address matches the SAS Address * for this port */ sci_phy_get_sas_address(iphy, &phy_sas_address); if (port_sas_address.high != phy_sas_address.high || port_sas_address.low != phy_sas_address.low) return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; } return sci_port_set_phy(iport, iphy); } case SCI_PORT_SUB_WAITING: case SCI_PORT_SUB_OPERATIONAL: status = sci_port_set_phy(iport, iphy); if (status != SCI_SUCCESS) return status; sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME); iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING; port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING); return status; case SCI_PORT_SUB_CONFIGURING: status = sci_port_set_phy(iport, iphy); if (status != SCI_SUCCESS) return status; sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY); /* Re-enter the configuring state since this may be the last phy in * the port. */ port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING); return SCI_SUCCESS; default: dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", __func__, port_state_name(state)); return SCI_FAILURE_INVALID_STATE; } } /** * sci_port_remove_phy() * @iport: This parameter specifies the port in which the phy will be added. * @iphy: This parameter is the phy which is to be added to the port. * * This method will remove the PHY from the selected PORT. This method returns * an enum sci_status. SCI_SUCCESS the phy has been removed from the port. Any * other status is a failure to add the phy to the port. */ enum sci_status sci_port_remove_phy(struct isci_port *iport, struct isci_phy *iphy) { enum sci_status status; enum sci_port_states state; state = iport->sm.current_state_id; switch (state) { case SCI_PORT_STOPPED: return sci_port_clear_phy(iport, iphy); case SCI_PORT_SUB_OPERATIONAL: status = sci_port_clear_phy(iport, iphy); if (status != SCI_SUCCESS) return status; sci_port_deactivate_phy(iport, iphy, true); iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING; port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING); return SCI_SUCCESS; case SCI_PORT_SUB_CONFIGURING: status = sci_port_clear_phy(iport, iphy); if (status != SCI_SUCCESS) return status; sci_port_deactivate_phy(iport, iphy, true); /* Re-enter the configuring state since this may be the last phy in * the port */ port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING); return SCI_SUCCESS; default: dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", __func__, port_state_name(state)); return SCI_FAILURE_INVALID_STATE; } } enum sci_status sci_port_link_up(struct isci_port *iport, struct isci_phy *iphy) { enum sci_port_states state; state = iport->sm.current_state_id; switch (state) { case SCI_PORT_SUB_WAITING: /* Since this is the first phy going link up for the port we * can just enable it and continue */ sci_port_activate_phy(iport, iphy, PF_NOTIFY|PF_RESUME); port_state_machine_change(iport, SCI_PORT_SUB_OPERATIONAL); return SCI_SUCCESS; case SCI_PORT_SUB_OPERATIONAL: sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME); return SCI_SUCCESS; case SCI_PORT_RESETTING: /* TODO We should make sure that the phy that has gone * link up is the same one on which we sent the reset. It is * possible that the phy on which we sent the reset is not the * one that has gone link up and we want to make sure that * phy being reset comes back. Consider the case where a * reset is sent but before the hardware processes the reset it * get a link up on the port because of a hot plug event. * because of the reset request this phy will go link down * almost immediately. */ /* In the resetting state we don't notify the user regarding * link up and link down notifications. */ sci_port_general_link_up_handler(iport, iphy, PF_RESUME); return SCI_SUCCESS; default: dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", __func__, port_state_name(state)); return SCI_FAILURE_INVALID_STATE; } } enum sci_status sci_port_link_down(struct isci_port *iport, struct isci_phy *iphy) { enum sci_port_states state; state = iport->sm.current_state_id; switch (state) { case SCI_PORT_SUB_OPERATIONAL: sci_port_deactivate_phy(iport, iphy, true); /* If there are no active phys left in the port, then * transition the port to the WAITING state until such time * as a phy goes link up */ if (iport->active_phy_mask == 0) port_state_machine_change(iport, SCI_PORT_SUB_WAITING); return SCI_SUCCESS; case SCI_PORT_RESETTING: /* In the resetting state we don't notify the user regarding * link up and link down notifications. */ sci_port_deactivate_phy(iport, iphy, false); return SCI_SUCCESS; default: dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", __func__, port_state_name(state)); return SCI_FAILURE_INVALID_STATE; } } enum sci_status sci_port_start_io(struct isci_port *iport, struct isci_remote_device *idev, struct isci_request *ireq) { enum sci_port_states state; state = iport->sm.current_state_id; switch (state) { case SCI_PORT_SUB_WAITING: return SCI_FAILURE_INVALID_STATE; case SCI_PORT_SUB_OPERATIONAL: iport->started_request_count++; return SCI_SUCCESS; default: dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", __func__, port_state_name(state)); return SCI_FAILURE_INVALID_STATE; } } enum sci_status sci_port_complete_io(struct isci_port *iport, struct isci_remote_device *idev, struct isci_request *ireq) { enum sci_port_states state; state = iport->sm.current_state_id; switch (state) { case SCI_PORT_STOPPED: dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", __func__, port_state_name(state)); return SCI_FAILURE_INVALID_STATE; case SCI_PORT_STOPPING: sci_port_decrement_request_count(iport); if (iport->started_request_count == 0) port_state_machine_change(iport, SCI_PORT_STOPPED); break; case SCI_PORT_READY: case SCI_PORT_RESETTING: case SCI_PORT_FAILED: case SCI_PORT_SUB_WAITING: case SCI_PORT_SUB_OPERATIONAL: sci_port_decrement_request_count(iport); break; case SCI_PORT_SUB_CONFIGURING: sci_port_decrement_request_count(iport); if (iport->started_request_count == 0) { port_state_machine_change(iport, SCI_PORT_SUB_OPERATIONAL); } break; } return SCI_SUCCESS; } static void sci_port_enable_port_task_scheduler(struct isci_port *iport) { u32 pts_control_value; /* enable the port task scheduler in a suspended state */ pts_control_value = readl(&iport->port_task_scheduler_registers->control); pts_control_value |= SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND); writel(pts_control_value, &iport->port_task_scheduler_registers->control); } static void sci_port_disable_port_task_scheduler(struct isci_port *iport) { u32 pts_control_value; pts_control_value = readl(&iport->port_task_scheduler_registers->control); pts_control_value &= ~(SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND)); writel(pts_control_value, &iport->port_task_scheduler_registers->control); } static void sci_port_post_dummy_remote_node(struct isci_port *iport) { struct isci_host *ihost = iport->owning_controller; u8 phys_index = iport->physical_port_index; union scu_remote_node_context *rnc; u16 rni = iport->reserved_rni; u32 command; rnc = &ihost->remote_node_context_table[rni]; rnc->ssp.is_valid = true; command = SCU_CONTEXT_COMMAND_POST_RNC_32 | phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni; sci_controller_post_request(ihost, command); /* ensure hardware has seen the post rnc command and give it * ample time to act before sending the suspend */ readl(&ihost->smu_registers->interrupt_status); /* flush */ udelay(10); command = SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX | phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni; sci_controller_post_request(ihost, command); } static void sci_port_stopped_state_enter(struct sci_base_state_machine *sm) { struct isci_port *iport = container_of(sm, typeof(*iport), sm); if (iport->sm.previous_state_id == SCI_PORT_STOPPING) { /* * If we enter this state becasuse of a request to stop * the port then we want to disable the hardwares port * task scheduler. */ sci_port_disable_port_task_scheduler(iport); } } static void sci_port_stopped_state_exit(struct sci_base_state_machine *sm) { struct isci_port *iport = container_of(sm, typeof(*iport), sm); /* Enable and suspend the port task scheduler */ sci_port_enable_port_task_scheduler(iport); } static void sci_port_ready_state_enter(struct sci_base_state_machine *sm) { struct isci_port *iport = container_of(sm, typeof(*iport), sm); struct isci_host *ihost = iport->owning_controller; u32 prev_state; prev_state = iport->sm.previous_state_id; if (prev_state == SCI_PORT_RESETTING) isci_port_hard_reset_complete(iport, SCI_SUCCESS); else dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n", __func__, iport->physical_port_index); /* Post and suspend the dummy remote node context for this port. */ sci_port_post_dummy_remote_node(iport); /* Start the ready substate machine */ port_state_machine_change(iport, SCI_PORT_SUB_WAITING); } static void sci_port_resetting_state_exit(struct sci_base_state_machine *sm) { struct isci_port *iport = container_of(sm, typeof(*iport), sm); sci_del_timer(&iport->timer); } static void sci_port_stopping_state_exit(struct sci_base_state_machine *sm) { struct isci_port *iport = container_of(sm, typeof(*iport), sm); sci_del_timer(&iport->timer); sci_port_destroy_dummy_resources(iport); } static void sci_port_failed_state_enter(struct sci_base_state_machine *sm) { struct isci_port *iport = container_of(sm, typeof(*iport), sm); isci_port_hard_reset_complete(iport, SCI_FAILURE_TIMEOUT); } void sci_port_set_hang_detection_timeout(struct isci_port *iport, u32 timeout) { int phy_index; u32 phy_mask = iport->active_phy_mask; if (timeout) ++iport->hang_detect_users; else if (iport->hang_detect_users > 1) --iport->hang_detect_users; else iport->hang_detect_users = 0; if (timeout || (iport->hang_detect_users == 0)) { for (phy_index = 0; phy_index < SCI_MAX_PHYS; phy_index++) { if ((phy_mask >> phy_index) & 1) { writel(timeout, &iport->phy_table[phy_index] ->link_layer_registers ->link_layer_hang_detection_timeout); } } } } /* --------------------------------------------------------------------------- */ static const struct sci_base_state sci_port_state_table[] = { [SCI_PORT_STOPPED] = { .enter_state = sci_port_stopped_state_enter, .exit_state = sci_port_stopped_state_exit }, [SCI_PORT_STOPPING] = { .exit_state = sci_port_stopping_state_exit }, [SCI_PORT_READY] = { .enter_state = sci_port_ready_state_enter, }, [SCI_PORT_SUB_WAITING] = { .enter_state = sci_port_ready_substate_waiting_enter, .exit_state = scic_sds_port_ready_substate_waiting_exit, }, [SCI_PORT_SUB_OPERATIONAL] = { .enter_state = sci_port_ready_substate_operational_enter, .exit_state = sci_port_ready_substate_operational_exit }, [SCI_PORT_SUB_CONFIGURING] = { .enter_state = sci_port_ready_substate_configuring_enter }, [SCI_PORT_RESETTING] = { .exit_state = sci_port_resetting_state_exit }, [SCI_PORT_FAILED] = { .enter_state = sci_port_failed_state_enter, } }; void sci_port_construct(struct isci_port *iport, u8 index, struct isci_host *ihost) { sci_init_sm(&iport->sm, sci_port_state_table, SCI_PORT_STOPPED); iport->logical_port_index = SCIC_SDS_DUMMY_PORT; iport->physical_port_index = index; iport->active_phy_mask = 0; iport->enabled_phy_mask = 0; iport->last_active_phy = 0; iport->ready_exit = false; iport->owning_controller = ihost; iport->started_request_count = 0; iport->assigned_device_count = 0; iport->hang_detect_users = 0; iport->reserved_rni = SCU_DUMMY_INDEX; iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG; sci_init_timer(&iport->timer, port_timeout); iport->port_task_scheduler_registers = NULL; for (index = 0; index < SCI_MAX_PHYS; index++) iport->phy_table[index] = NULL; } void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy) { struct isci_host *ihost = iport->owning_controller; /* notify the user. */ isci_port_bc_change_received(ihost, iport, iphy); } static void wait_port_reset(struct isci_host *ihost, struct isci_port *iport) { wait_event(ihost->eventq, !test_bit(IPORT_RESET_PENDING, &iport->state)); } int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport, struct isci_phy *iphy) { unsigned long flags; enum sci_status status; int ret = TMF_RESP_FUNC_COMPLETE; dev_dbg(&ihost->pdev->dev, "%s: iport = %p\n", __func__, iport); spin_lock_irqsave(&ihost->scic_lock, flags); set_bit(IPORT_RESET_PENDING, &iport->state); #define ISCI_PORT_RESET_TIMEOUT SCIC_SDS_SIGNATURE_FIS_TIMEOUT status = sci_port_hard_reset(iport, ISCI_PORT_RESET_TIMEOUT); spin_unlock_irqrestore(&ihost->scic_lock, flags); if (status == SCI_SUCCESS) { wait_port_reset(ihost, iport); dev_dbg(&ihost->pdev->dev, "%s: iport = %p; hard reset completion\n", __func__, iport); if (iport->hard_reset_status != SCI_SUCCESS) { ret = TMF_RESP_FUNC_FAILED; dev_err(&ihost->pdev->dev, "%s: iport = %p; hard reset failed (0x%x)\n", __func__, iport, iport->hard_reset_status); } } else { clear_bit(IPORT_RESET_PENDING, &iport->state); wake_up(&ihost->eventq); ret = TMF_RESP_FUNC_FAILED; dev_err(&ihost->pdev->dev, "%s: iport = %p; sci_port_hard_reset call" " failed 0x%x\n", __func__, iport, status); } return ret; } int isci_ata_check_ready(struct domain_device *dev) { struct isci_port *iport = dev->port->lldd_port; struct isci_host *ihost = dev_to_ihost(dev); struct isci_remote_device *idev; unsigned long flags; int rc = 0; spin_lock_irqsave(&ihost->scic_lock, flags); idev = isci_lookup_device(dev); spin_unlock_irqrestore(&ihost->scic_lock, flags); if (!idev) goto out; if (test_bit(IPORT_RESET_PENDING, &iport->state)) goto out; rc = !!iport->active_phy_mask; out: isci_put_device(idev); return rc; } void isci_port_deformed(struct asd_sas_phy *phy) { struct isci_host *ihost = phy->ha->lldd_ha; struct isci_port *iport = phy->port->lldd_port; unsigned long flags; int i; /* we got a port notification on a port that was subsequently * torn down and libsas is just now catching up */ if (!iport) return; spin_lock_irqsave(&ihost->scic_lock, flags); for (i = 0; i < SCI_MAX_PHYS; i++) { if (iport->active_phy_mask & 1 << i) break; } spin_unlock_irqrestore(&ihost->scic_lock, flags); if (i >= SCI_MAX_PHYS) dev_dbg(&ihost->pdev->dev, "%s: port: %ld\n", __func__, (long) (iport - &ihost->ports[0])); } void isci_port_formed(struct asd_sas_phy *phy) { struct isci_host *ihost = phy->ha->lldd_ha; struct isci_phy *iphy = to_iphy(phy); struct asd_sas_port *port = phy->port; struct isci_port *iport = NULL; unsigned long flags; int i; /* initial ports are formed as the driver is still initializing, * wait for that process to complete */ wait_for_start(ihost); spin_lock_irqsave(&ihost->scic_lock, flags); for (i = 0; i < SCI_MAX_PORTS; i++) { iport = &ihost->ports[i]; if (iport->active_phy_mask & 1 << iphy->phy_index) break; } spin_unlock_irqrestore(&ihost->scic_lock, flags); if (i >= SCI_MAX_PORTS) iport = NULL; port->lldd_port = iport; }
linux-master
drivers/scsi/isci/port.c
/* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * The full GNU General Public License is included in this distribution * in the file called LICENSE.GPL. * * BSD LICENSE * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * This file contains the implementation of the SCIC_SDS_REMOTE_NODE_TABLE * public, protected, and private methods. */ #include "remote_node_table.h" #include "remote_node_context.h" /** * sci_remote_node_table_get_group_index() * @remote_node_table: This is the remote node index table from which the * selection will be made. * @group_table_index: This is the index to the group table from which to * search for an available selection. * * This routine will find the bit position in absolute bit terms of the next 32 * + bit position. If there are available bits in the first u32 then it is * just bit position. u32 This is the absolute bit position for an available * group. */ static u32 sci_remote_node_table_get_group_index( struct sci_remote_node_table *remote_node_table, u32 group_table_index) { u32 dword_index; u32 *group_table; u32 bit_index; group_table = remote_node_table->remote_node_groups[group_table_index]; for (dword_index = 0; dword_index < remote_node_table->group_array_size; dword_index++) { if (group_table[dword_index] != 0) { for (bit_index = 0; bit_index < 32; bit_index++) { if ((group_table[dword_index] & (1 << bit_index)) != 0) { return (dword_index * 32) + bit_index; } } } } return SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX; } /** * sci_remote_node_table_clear_group_index() * @remote_node_table: This the remote node table in which to clear the * selector. * @group_table_index: This is the remote node selector in which the change will be * made. * @group_index: This is the bit index in the table to be modified. * * This method will clear the group index entry in the specified group index * table. none */ static void sci_remote_node_table_clear_group_index( struct sci_remote_node_table *remote_node_table, u32 group_table_index, u32 group_index) { u32 dword_index; u32 bit_index; u32 *group_table; BUG_ON(group_table_index >= SCU_STP_REMOTE_NODE_COUNT); BUG_ON(group_index >= (u32)(remote_node_table->group_array_size * 32)); dword_index = group_index / 32; bit_index = group_index % 32; group_table = remote_node_table->remote_node_groups[group_table_index]; group_table[dword_index] = group_table[dword_index] & ~(1 << bit_index); } /** * sci_remote_node_table_set_group_index() * @remote_node_table: This the remote node table in which to set the * selector. * @group_table_index: This is the remote node selector in which the change * will be made. * @group_index: This is the bit position in the table to be modified. * * This method will set the group index bit entry in the specified gropu index * table. none */ static void sci_remote_node_table_set_group_index( struct sci_remote_node_table *remote_node_table, u32 group_table_index, u32 group_index) { u32 dword_index; u32 bit_index; u32 *group_table; BUG_ON(group_table_index >= SCU_STP_REMOTE_NODE_COUNT); BUG_ON(group_index >= (u32)(remote_node_table->group_array_size * 32)); dword_index = group_index / 32; bit_index = group_index % 32; group_table = remote_node_table->remote_node_groups[group_table_index]; group_table[dword_index] = group_table[dword_index] | (1 << bit_index); } /** * sci_remote_node_table_set_node_index() * @remote_node_table: This is the remote node table in which to modify * the remote node availability. * @remote_node_index: This is the remote node index that is being returned to * the table. * * This method will set the remote to available in the remote node allocation * table. none */ static void sci_remote_node_table_set_node_index( struct sci_remote_node_table *remote_node_table, u32 remote_node_index) { u32 dword_location; u32 dword_remainder; u32 slot_normalized; u32 slot_position; BUG_ON( (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD) <= (remote_node_index / SCU_STP_REMOTE_NODE_COUNT) ); dword_location = remote_node_index / SCIC_SDS_REMOTE_NODES_PER_DWORD; dword_remainder = remote_node_index % SCIC_SDS_REMOTE_NODES_PER_DWORD; slot_normalized = (dword_remainder / SCU_STP_REMOTE_NODE_COUNT) * sizeof(u32); slot_position = remote_node_index % SCU_STP_REMOTE_NODE_COUNT; remote_node_table->available_remote_nodes[dword_location] |= 1 << (slot_normalized + slot_position); } /** * sci_remote_node_table_clear_node_index() * @remote_node_table: This is the remote node table from which to clear * the available remote node bit. * @remote_node_index: This is the remote node index which is to be cleared * from the table. * * This method clears the remote node index from the table of available remote * nodes. none */ static void sci_remote_node_table_clear_node_index( struct sci_remote_node_table *remote_node_table, u32 remote_node_index) { u32 dword_location; u32 dword_remainder; u32 slot_position; u32 slot_normalized; BUG_ON( (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD) <= (remote_node_index / SCU_STP_REMOTE_NODE_COUNT) ); dword_location = remote_node_index / SCIC_SDS_REMOTE_NODES_PER_DWORD; dword_remainder = remote_node_index % SCIC_SDS_REMOTE_NODES_PER_DWORD; slot_normalized = (dword_remainder / SCU_STP_REMOTE_NODE_COUNT) * sizeof(u32); slot_position = remote_node_index % SCU_STP_REMOTE_NODE_COUNT; remote_node_table->available_remote_nodes[dword_location] &= ~(1 << (slot_normalized + slot_position)); } /** * sci_remote_node_table_clear_group() * @remote_node_table: The remote node table from which the slot will be * cleared. * @group_index: The index for the slot that is to be cleared. * * This method clears the entire table slot at the specified slot index. none */ static void sci_remote_node_table_clear_group( struct sci_remote_node_table *remote_node_table, u32 group_index) { u32 dword_location; u32 dword_remainder; u32 dword_value; BUG_ON( (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD) <= (group_index / SCU_STP_REMOTE_NODE_COUNT) ); dword_location = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD; dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD; dword_value = remote_node_table->available_remote_nodes[dword_location]; dword_value &= ~(SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4)); remote_node_table->available_remote_nodes[dword_location] = dword_value; } /* * sci_remote_node_table_set_group() * * THis method sets an entire remote node group in the remote node table. */ static void sci_remote_node_table_set_group( struct sci_remote_node_table *remote_node_table, u32 group_index) { u32 dword_location; u32 dword_remainder; u32 dword_value; BUG_ON( (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD) <= (group_index / SCU_STP_REMOTE_NODE_COUNT) ); dword_location = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD; dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD; dword_value = remote_node_table->available_remote_nodes[dword_location]; dword_value |= (SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4)); remote_node_table->available_remote_nodes[dword_location] = dword_value; } /** * sci_remote_node_table_get_group_value() * @remote_node_table: This is the remote node table that for which the group * value is to be returned. * @group_index: This is the group index to use to find the group value. * * This method will return the group value for the specified group index. The * bit values at the specified remote node group index. */ static u8 sci_remote_node_table_get_group_value( struct sci_remote_node_table *remote_node_table, u32 group_index) { u32 dword_location; u32 dword_remainder; u32 dword_value; dword_location = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD; dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD; dword_value = remote_node_table->available_remote_nodes[dword_location]; dword_value &= (SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4)); dword_value = dword_value >> (dword_remainder * 4); return (u8)dword_value; } /** * sci_remote_node_table_initialize() * @remote_node_table: The remote that which is to be initialized. * @remote_node_entries: The number of entries to put in the table. * * This method will initialize the remote node table for use. none */ void sci_remote_node_table_initialize( struct sci_remote_node_table *remote_node_table, u32 remote_node_entries) { u32 index; /* * Initialize the raw data we could improve the speed by only initializing * those entries that we are actually going to be used */ memset( remote_node_table->available_remote_nodes, 0x00, sizeof(remote_node_table->available_remote_nodes) ); memset( remote_node_table->remote_node_groups, 0x00, sizeof(remote_node_table->remote_node_groups) ); /* Initialize the available remote node sets */ remote_node_table->available_nodes_array_size = (u16) (remote_node_entries / SCIC_SDS_REMOTE_NODES_PER_DWORD) + ((remote_node_entries % SCIC_SDS_REMOTE_NODES_PER_DWORD) != 0); /* Initialize each full DWORD to a FULL SET of remote nodes */ for (index = 0; index < remote_node_entries; index++) { sci_remote_node_table_set_node_index(remote_node_table, index); } remote_node_table->group_array_size = (u16) (remote_node_entries / (SCU_STP_REMOTE_NODE_COUNT * 32)) + ((remote_node_entries % (SCU_STP_REMOTE_NODE_COUNT * 32)) != 0); for (index = 0; index < (remote_node_entries / SCU_STP_REMOTE_NODE_COUNT); index++) { /* * These are all guaranteed to be full slot values so fill them in the * available sets of 3 remote nodes */ sci_remote_node_table_set_group_index(remote_node_table, 2, index); } /* Now fill in any remainders that we may find */ if ((remote_node_entries % SCU_STP_REMOTE_NODE_COUNT) == 2) { sci_remote_node_table_set_group_index(remote_node_table, 1, index); } else if ((remote_node_entries % SCU_STP_REMOTE_NODE_COUNT) == 1) { sci_remote_node_table_set_group_index(remote_node_table, 0, index); } } /** * sci_remote_node_table_allocate_single_remote_node() * @remote_node_table: The remote node table from which to allocate a * remote node. * @group_table_index: The group index that is to be used for the search. * * This method will allocate a single RNi from the remote node table. The * table index will determine from which remote node group table to search. * This search may fail and another group node table can be specified. The * function is designed to allow a serach of the available single remote node * group up to the triple remote node group. If an entry is found in the * specified table the remote node is removed and the remote node groups are * updated. The RNi value or an invalid remote node context if an RNi can not * be found. */ static u16 sci_remote_node_table_allocate_single_remote_node( struct sci_remote_node_table *remote_node_table, u32 group_table_index) { u8 index; u8 group_value; u32 group_index; u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX; group_index = sci_remote_node_table_get_group_index( remote_node_table, group_table_index); /* We could not find an available slot in the table selector 0 */ if (group_index != SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX) { group_value = sci_remote_node_table_get_group_value( remote_node_table, group_index); for (index = 0; index < SCU_STP_REMOTE_NODE_COUNT; index++) { if (((1 << index) & group_value) != 0) { /* We have selected a bit now clear it */ remote_node_index = (u16)(group_index * SCU_STP_REMOTE_NODE_COUNT + index); sci_remote_node_table_clear_group_index( remote_node_table, group_table_index, group_index ); sci_remote_node_table_clear_node_index( remote_node_table, remote_node_index ); if (group_table_index > 0) { sci_remote_node_table_set_group_index( remote_node_table, group_table_index - 1, group_index ); } break; } } } return remote_node_index; } /** * sci_remote_node_table_allocate_triple_remote_node() * @remote_node_table: This is the remote node table from which to allocate the * remote node entries. * @group_table_index: This is the group table index which must equal two (2) * for this operation. * * This method will allocate three consecutive remote node context entries. If * there are no remaining triple entries the function will return a failure. * The remote node index that represents three consecutive remote node entries * or an invalid remote node context if none can be found. */ static u16 sci_remote_node_table_allocate_triple_remote_node( struct sci_remote_node_table *remote_node_table, u32 group_table_index) { u32 group_index; u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX; group_index = sci_remote_node_table_get_group_index( remote_node_table, group_table_index); if (group_index != SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX) { remote_node_index = (u16)group_index * SCU_STP_REMOTE_NODE_COUNT; sci_remote_node_table_clear_group_index( remote_node_table, group_table_index, group_index ); sci_remote_node_table_clear_group( remote_node_table, group_index ); } return remote_node_index; } /** * sci_remote_node_table_allocate_remote_node() * @remote_node_table: This is the remote node table from which the remote node * allocation is to take place. * @remote_node_count: This is ther remote node count which is one of * SCU_SSP_REMOTE_NODE_COUNT(1) or SCU_STP_REMOTE_NODE_COUNT(3). * * This method will allocate a remote node that mataches the remote node count * specified by the caller. Valid values for remote node count is * SCU_SSP_REMOTE_NODE_COUNT(1) or SCU_STP_REMOTE_NODE_COUNT(3). u16 This is * the remote node index that is returned or an invalid remote node context. */ u16 sci_remote_node_table_allocate_remote_node( struct sci_remote_node_table *remote_node_table, u32 remote_node_count) { u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX; if (remote_node_count == SCU_SSP_REMOTE_NODE_COUNT) { remote_node_index = sci_remote_node_table_allocate_single_remote_node( remote_node_table, 0); if (remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) { remote_node_index = sci_remote_node_table_allocate_single_remote_node( remote_node_table, 1); } if (remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) { remote_node_index = sci_remote_node_table_allocate_single_remote_node( remote_node_table, 2); } } else if (remote_node_count == SCU_STP_REMOTE_NODE_COUNT) { remote_node_index = sci_remote_node_table_allocate_triple_remote_node( remote_node_table, 2); } return remote_node_index; } /** * sci_remote_node_table_release_single_remote_node() * @remote_node_table: This is the remote node table from which the remote node * release is to take place. * @remote_node_index: This is the remote node index that is being released. * This method will free a single remote node index back to the remote node * table. This routine will update the remote node groups */ static void sci_remote_node_table_release_single_remote_node( struct sci_remote_node_table *remote_node_table, u16 remote_node_index) { u32 group_index; u8 group_value; group_index = remote_node_index / SCU_STP_REMOTE_NODE_COUNT; group_value = sci_remote_node_table_get_group_value(remote_node_table, group_index); /* * Assert that we are not trying to add an entry to a slot that is already * full. */ BUG_ON(group_value == SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE); if (group_value == 0x00) { /* * There are no entries in this slot so it must be added to the single * slot table. */ sci_remote_node_table_set_group_index(remote_node_table, 0, group_index); } else if ((group_value & (group_value - 1)) == 0) { /* * There is only one entry in this slot so it must be moved from the * single slot table to the dual slot table */ sci_remote_node_table_clear_group_index(remote_node_table, 0, group_index); sci_remote_node_table_set_group_index(remote_node_table, 1, group_index); } else { /* * There are two entries in the slot so it must be moved from the dual * slot table to the tripple slot table. */ sci_remote_node_table_clear_group_index(remote_node_table, 1, group_index); sci_remote_node_table_set_group_index(remote_node_table, 2, group_index); } sci_remote_node_table_set_node_index(remote_node_table, remote_node_index); } /** * sci_remote_node_table_release_triple_remote_node() * @remote_node_table: This is the remote node table to which the remote node * index is to be freed. * @remote_node_index: This is the remote node index that is being released. * * This method will release a group of three consecutive remote nodes back to * the free remote nodes. */ static void sci_remote_node_table_release_triple_remote_node( struct sci_remote_node_table *remote_node_table, u16 remote_node_index) { u32 group_index; group_index = remote_node_index / SCU_STP_REMOTE_NODE_COUNT; sci_remote_node_table_set_group_index( remote_node_table, 2, group_index ); sci_remote_node_table_set_group(remote_node_table, group_index); } /** * sci_remote_node_table_release_remote_node_index() * @remote_node_table: The remote node table to which the remote node index is * to be freed. * @remote_node_count: This is the count of consecutive remote nodes that are * to be freed. * @remote_node_index: This is the remote node index that is being released. * * This method will release the remote node index back into the remote node * table free pool. */ void sci_remote_node_table_release_remote_node_index( struct sci_remote_node_table *remote_node_table, u32 remote_node_count, u16 remote_node_index) { if (remote_node_count == SCU_SSP_REMOTE_NODE_COUNT) { sci_remote_node_table_release_single_remote_node( remote_node_table, remote_node_index); } else if (remote_node_count == SCU_STP_REMOTE_NODE_COUNT) { sci_remote_node_table_release_triple_remote_node( remote_node_table, remote_node_index); } }
linux-master
drivers/scsi/isci/remote_node_table.c